From afce0b7d144d8eb0cfccbff5761c973635e473d0 Mon Sep 17 00:00:00 2001 From: Sam Rijs Date: Tue, 25 Sep 2018 21:30:51 +1000 Subject: [PATCH 001/819] Add Json wrapper type --- postgres-shared/Cargo.toml | 3 +- postgres-shared/src/types/mod.rs | 3 ++ postgres-shared/src/types/serde_json.rs | 43 +++++++++++++++++++++---- 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/postgres-shared/Cargo.toml b/postgres-shared/Cargo.toml index 853a9f46e..795e95b45 100644 --- a/postgres-shared/Cargo.toml +++ b/postgres-shared/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/sfackler/rust-postgres" "with-chrono-0.4" = ["chrono"] "with-eui48-0.3" = ["eui48"] "with-geo-0.10" = ["geo"] -with-serde_json-1 = ["serde_json"] +with-serde_json-1 = ["serde", "serde_json"] "with-uuid-0.6" = ["uuid"] [dependencies] @@ -24,5 +24,6 @@ bit-vec = { version = "0.5", optional = true } chrono = { version = "0.4", optional = true } eui48 = { version = "0.3", optional = true } geo = { version = "0.10", optional = true } +serde = { version = "1.0", optional = true } serde_json = { version = "1.0", optional = true } uuid = { version = "0.6", optional = true } diff --git a/postgres-shared/src/types/mod.rs b/postgres-shared/src/types/mod.rs index 457248164..627720806 100644 --- a/postgres-shared/src/types/mod.rs +++ b/postgres-shared/src/types/mod.rs @@ -87,6 +87,9 @@ mod uuid; mod special; mod type_gen; +#[cfg(feature = "with-serde_json-1")] +pub use self::serde_json::Json; + /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Type(Inner); diff --git a/postgres-shared/src/types/serde_json.rs b/postgres-shared/src/types/serde_json.rs index 53a63d587..9544f6d24 100644 --- a/postgres-shared/src/types/serde_json.rs +++ b/postgres-shared/src/types/serde_json.rs @@ -1,13 +1,22 @@ +extern crate serde; extern crate serde_json; +use self::serde::{Deserialize, Serialize}; use self::serde_json::Value; use std::error::Error; -use std::io::{Read, Write}; +use std::fmt::Debug; +use std::io::Read; use types::{FromSql, IsNull, ToSql, Type}; -impl<'a> FromSql<'a> for Value { - fn from_sql(ty: &Type, mut raw: &[u8]) -> Result> { +#[derive(Debug)] +pub struct Json(pub T); + +impl<'a, T> FromSql<'a> for Json +where + T: Deserialize<'a>, +{ + fn from_sql(ty: &Type, mut raw: &'a [u8]) -> Result, Box> { if *ty == Type::JSONB { let mut b = [0; 1]; raw.read_exact(&mut b)?; @@ -16,21 +25,43 @@ impl<'a> FromSql<'a> for Value { return Err("unsupported JSONB encoding version".into()); } } - serde_json::de::from_reader(raw).map_err(Into::into) + serde_json::de::from_slice(raw) + .map(Json) + .map_err(Into::into) } accepts!(JSON, JSONB); } -impl ToSql for Value { +impl ToSql for Json +where + T: Serialize + Debug, +{ fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { if *ty == Type::JSONB { out.push(1); } - write!(out, "{}", self)?; + serde_json::ser::to_writer(out, &self.0)?; Ok(IsNull::No) } accepts!(JSON, JSONB); to_sql_checked!(); } + +impl<'a> FromSql<'a> for Value { + fn from_sql(ty: &Type, raw: &[u8]) -> Result> { + Json::::from_sql(ty, raw).map(|json| json.0) + } + + accepts!(JSON, JSONB); +} + +impl ToSql for Value { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + Json(self).to_sql(ty, out) + } + + accepts!(JSON, JSONB); + to_sql_checked!(); +} From a7db65827959eeef51accf84acaf1507770b82bb Mon Sep 17 00:00:00 2001 From: Philipp Korber Date: Tue, 25 Sep 2018 15:21:31 +0200 Subject: [PATCH 002/819] Test partial rollback of nested commits. Related to #371. --- postgres/tests/test.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/postgres/tests/test.rs b/postgres/tests/test.rs index 13ef53e7b..753d9d9ad 100644 --- a/postgres/tests/test.rs +++ b/postgres/tests/test.rs @@ -331,6 +331,41 @@ fn test_nested_transactions_finish() { ); } +#[test] +fn test_nested_transactions_partial_rollback() { + let conn = or_panic!(Connection::connect( + "postgres://postgres@localhost:5433", + TlsMode::None, + )); + or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); + + or_panic!(conn.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); + + { + let trans = or_panic!(conn.transaction()); + or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&2i32])); + { + let trans = or_panic!(trans.transaction()); + or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&3i32])); + { + let trans = or_panic!(trans.transaction()); + or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&4i32])); + drop(trans); + } + drop(trans); + } + or_panic!(trans.commit()); + } + + let stmt = or_panic!(conn.prepare("SELECT * FROM foo ORDER BY id")); + let result = or_panic!(stmt.query(&[])); + + assert_eq!( + vec![1i32, 2], + result.iter().map(|row| row.get(0)).collect::>() + ); +} + #[test] #[should_panic(expected = "active transaction")] fn test_conn_trans_when_nested() { From 96f97a6117a7b779a33a414ece8e333904839c46 Mon Sep 17 00:00:00 2001 From: Philipp Korber Date: Tue, 25 Sep 2018 15:23:25 +0200 Subject: [PATCH 003/819] Fixes #371 by not reusing the same savepoint name. Rolling back to a savepoint doesn't release it so, reusing the same name in nested transaction means inner transactions only roll back to the most transaction/last savepoint. If the outer most transaction commits but did multiple consecutive rollbacks in the nested transactions this did cause an unexpected state in the database before the fix. --- postgres/src/transaction.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 60b1dcb06..1dfdc7d3c 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -244,7 +244,7 @@ impl<'conn> Transaction<'conn> { /// /// Panics if there is an active nested transaction. pub fn transaction<'a>(&'a self) -> Result> { - self.savepoint("sp") + self.savepoint(format!("sp_{}", self.depth())) } /// Like `Connection::transaction`, but creates a nested transaction via @@ -253,7 +253,14 @@ impl<'conn> Transaction<'conn> { /// # Panics /// /// Panics if there is an active nested transaction. - pub fn savepoint<'a>(&'a self, name: &str) -> Result> { + #[inline] + pub fn savepoint<'a, I>(&'a self, name: I) -> Result> + where I: Into + { + self._savepoint(name.into()) + } + + fn _savepoint<'a>(&'a self, name: String) -> Result> { let mut conn = self.conn.0.borrow_mut(); check_desync!(conn); assert!( @@ -265,7 +272,7 @@ impl<'conn> Transaction<'conn> { Ok(Transaction { conn: self.conn, depth: self.depth + 1, - savepoint_name: Some(name.to_owned()), + savepoint_name: Some(name), commit: Cell::new(false), finished: false, }) From 9befbd898f7b45b726621bf71670e8bc83095483 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 11 Oct 2018 09:00:59 -0700 Subject: [PATCH 004/819] Upgrade digest crates --- postgres-protocol/Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 6ab4c0077..ef21a2273 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -12,10 +12,10 @@ base64 = "0.9" byteorder = "1.0" bytes = "0.4" fallible-iterator = "0.1" -generic-array = "0.11" -hmac = "0.6" -md5 = "0.3" +generic-array = "0.12" +hmac = "0.7" +md5 = "0.4" memchr = "2.0" rand = "0.5" -sha2 = "0.7" +sha2 = "0.8" stringprep = "0.1" From 05ef00a2a16e023483136f8646e123870cdeab62 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 14 Oct 2018 16:51:30 -0700 Subject: [PATCH 005/819] Upgrade md5 --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ef21a2273..72194ac78 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -14,7 +14,7 @@ bytes = "0.4" fallible-iterator = "0.1" generic-array = "0.12" hmac = "0.7" -md5 = "0.4" +md5 = "0.5" memchr = "2.0" rand = "0.5" sha2 = "0.8" From 6f9b36a09a290ce8018a3945e4efeadd360a4ba1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 14 Oct 2018 16:57:32 -0700 Subject: [PATCH 006/819] Remove old benchmarks --- postgres/benches/bench.rs | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 postgres/benches/bench.rs diff --git a/postgres/benches/bench.rs b/postgres/benches/bench.rs deleted file mode 100644 index 64e214069..000000000 --- a/postgres/benches/bench.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![feature(test)] -extern crate test; -extern crate postgres; - -use postgres::{Connection, TlsMode}; - -#[bench] -fn bench_naiive_execute(b: &mut test::Bencher) { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[]) - .unwrap(); - - b.iter(|| { - let stmt = conn.prepare("UPDATE foo SET id = 1").unwrap(); - let out = stmt.execute(&[]).unwrap(); - stmt.finish().unwrap(); - out - }); -} - -#[bench] -fn bench_execute(b: &mut test::Bencher) { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[]) - .unwrap(); - - b.iter(|| conn.execute("UPDATE foo SET id = 1", &[]).unwrap()); -} From 255c758d41a0c84133fb3e114bf2a92895ceec7e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 14 Oct 2018 17:44:46 -0700 Subject: [PATCH 007/819] Add tokio-postgres-native-tls --- Cargo.toml | 1 + tokio-postgres-native-tls/Cargo.toml | 15 ++++ tokio-postgres-native-tls/src/lib.rs | 106 ++++++++++++++++++++++++++ tokio-postgres-native-tls/src/test.rs | 69 +++++++++++++++++ 4 files changed, 191 insertions(+) create mode 100644 tokio-postgres-native-tls/Cargo.toml create mode 100644 tokio-postgres-native-tls/src/lib.rs create mode 100644 tokio-postgres-native-tls/src/test.rs diff --git a/Cargo.toml b/Cargo.toml index d7a9186a8..54c0fbbed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,5 +7,6 @@ members = [ "postgres-openssl", "postgres-native-tls", "tokio-postgres", + "tokio-postgres-native-tls", "tokio-postgres-openssl", ] diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml new file mode 100644 index 000000000..5e9562023 --- /dev/null +++ b/tokio-postgres-native-tls/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "tokio-postgres-native-tls" +version = "0.1.0" +authors = ["Steven Fackler "] + +[dependencies] +bytes = "0.4" +futures = "0.1" +native-tls = "0.2" +tokio-io = "0.1" +tokio-tls = "0.2" +tokio-postgres = { version = "0.3", path = "../tokio-postgres" } + +[dev-dependencies] +tokio = "0.1.7" diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs new file mode 100644 index 000000000..dee06f974 --- /dev/null +++ b/tokio-postgres-native-tls/src/lib.rs @@ -0,0 +1,106 @@ +extern crate bytes; +extern crate futures; +extern crate native_tls; +extern crate tokio_io; +extern crate tokio_postgres; +extern crate tokio_tls; + +#[cfg(test)] +extern crate tokio; + +use bytes::{Buf, BufMut}; +use futures::{Future, Poll}; +use std::error::Error; +use std::io::{self, Read, Write}; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_postgres::tls::{Socket, TlsConnect, TlsStream}; + +#[cfg(test)] +mod test; + +pub struct TlsConnector { + connector: tokio_tls::TlsConnector, +} + +impl TlsConnector { + pub fn new() -> Result { + let connector = native_tls::TlsConnector::new()?; + Ok(TlsConnector::with_connector(connector)) + } + + pub fn with_connector(connector: native_tls::TlsConnector) -> TlsConnector { + TlsConnector { + connector: tokio_tls::TlsConnector::from(connector), + } + } +} + +impl TlsConnect for TlsConnector { + fn connect( + &self, + domain: &str, + socket: Socket, + ) -> Box, Error = Box> + Sync + Send> { + let f = self + .connector + .connect(domain, socket) + .map(|s| { + let s: Box = Box::new(SslStream(s)); + s + }).map_err(|e| { + let e: Box = Box::new(e); + e + }); + Box::new(f) + } +} + +struct SslStream(tokio_tls::TlsStream); + +impl Read for SslStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) + } +} + +impl AsyncRead for SslStream { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.0.prepare_uninitialized_buffer(buf) + } + + fn read_buf(&mut self, buf: &mut B) -> Poll + where + B: BufMut, + { + self.0.read_buf(buf) + } +} + +impl Write for SslStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.0.flush() + } +} + +impl AsyncWrite for SslStream { + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.0.shutdown() + } + + fn write_buf(&mut self, buf: &mut B) -> Poll + where + B: Buf, + { + self.0.write_buf(buf) + } +} + +impl TlsStream for SslStream { + fn tls_server_end_point(&self) -> Option> { + self.0.get_ref().tls_server_end_point().unwrap_or(None) + } +} diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs new file mode 100644 index 000000000..a860729ab --- /dev/null +++ b/tokio-postgres-native-tls/src/test.rs @@ -0,0 +1,69 @@ +use futures::{Future, Stream}; +use native_tls::{self, Certificate}; +use tokio::runtime::current_thread::Runtime; +use tokio_postgres::{self, TlsMode}; + +use TlsConnector; + +fn smoke_test(url: &str, tls: TlsMode) { + let mut runtime = Runtime::new().unwrap(); + + let handshake = tokio_postgres::connect(url.parse().unwrap(), tls); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + + let prepare = client.prepare("SELECT 1::INT4"); + let statement = runtime.block_on(prepare).unwrap(); + let select = client.query(&statement, &[]).collect().map(|rows| { + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); + }); + runtime.block_on(select).unwrap(); + + drop(statement); + drop(client); + runtime.run().unwrap(); +} + +#[test] +fn require() { + let connector = native_tls::TlsConnector::builder() + .add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ).build() + .unwrap(); + let connector = TlsConnector::with_connector(connector); + smoke_test( + "postgres://ssl_user@localhost:5433/postgres", + TlsMode::Require(Box::new(connector)), + ); +} + +#[test] +fn prefer() { + let connector = native_tls::TlsConnector::builder() + .add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ).build() + .unwrap(); + let connector = TlsConnector::with_connector(connector); + smoke_test( + "postgres://ssl_user@localhost:5433/postgres", + TlsMode::Prefer(Box::new(connector)), + ); +} + +#[test] +fn scram_user() { + let connector = native_tls::TlsConnector::builder() + .add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ).build() + .unwrap(); + let connector = TlsConnector::with_connector(connector); + smoke_test( + "postgres://scram_user:password@localhost:5433/postgres", + TlsMode::Require(Box::new(connector)), + ); +} From 16d1a050e7a46e272fe41a9dfc0929f8523bb14a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 14 Oct 2018 17:50:21 -0700 Subject: [PATCH 008/819] Tweak docs --- tokio-postgres/src/tls.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index bcb497787..d141d0d96 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -62,7 +62,7 @@ pub trait TlsStream: 'static + Sync + Send + AsyncRead + AsyncWrite { /// Returns the data associated with the `tls-unique` channel binding type as described in /// [RFC 5929], if supported. /// - /// An implementation only needs to support one of this or `tls_server_end_point`. + /// An implementation only needs to support at most one of this or `tls_server_end_point`. /// /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 fn tls_unique(&self) -> Option> { @@ -72,7 +72,7 @@ pub trait TlsStream: 'static + Sync + Send + AsyncRead + AsyncWrite { /// Returns the data associated with the `tls-server-end-point` channel binding type as /// described in [RFC 5929], if supported. /// - /// An implementation only needs to support one of this or `tls_unique`. + /// An implementation only needs to support at most one of this or `tls_unique`. /// /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 fn tls_server_end_point(&self) -> Option> { From dbc6bf24e45c322163ba40db312828fc3d58ad0b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 22 Aug 2018 22:35:33 -0700 Subject: [PATCH 009/819] Un-lock phf Closes #369 (cherry picked from commit 440a596350d417926faa07a952a638685f1c1e7a) --- codegen/Cargo.toml | 2 +- postgres-shared/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 2ce54a7c0..c49248844 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "=0.7.22" +phf_codegen = "0.7.22" regex = "0.1" marksman_escape = "0.1" linked-hash-map = "0.4" diff --git a/postgres-shared/Cargo.toml b/postgres-shared/Cargo.toml index 795e95b45..2bdeb1712 100644 --- a/postgres-shared/Cargo.toml +++ b/postgres-shared/Cargo.toml @@ -17,7 +17,7 @@ with-serde_json-1 = ["serde", "serde_json"] [dependencies] hex = "0.3" fallible-iterator = "0.1.3" -phf = "=0.7.22" +phf = "0.7.22" postgres-protocol = { version = "0.3", path = "../postgres-protocol" } bit-vec = { version = "0.5", optional = true } From 04bd98e7edb238511661b311c725a176012bca6a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 27 Oct 2018 16:31:51 -0700 Subject: [PATCH 010/819] Make TlsConnect Sync and Send Closes #382 --- tokio-postgres/src/tls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index d141d0d96..95766118e 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -50,7 +50,7 @@ impl AsyncWrite for Socket { } } -pub trait TlsConnect { +pub trait TlsConnect: Sync + Send { fn connect( &self, domain: &str, From f3777bed7670bfbefdc29eda5ce3a6ffd19d8352 Mon Sep 17 00:00:00 2001 From: Darren Tsung Date: Tue, 6 Nov 2018 10:14:32 -0800 Subject: [PATCH 011/819] Add a TransactionBuilder struct The TransactionBuilder is useful when trying to create a Transaction around a future which takes ownership of the client. See doc comment for more details. --- tokio-postgres/src/lib.rs | 25 +++++++++++++++++- tokio-postgres/tests/test.rs | 49 ++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index a16dd15e6..0d918664d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -116,7 +116,17 @@ impl Client { // FIXME error type? T::Error: From, { - Transaction(proto::TransactionFuture::new(self.0.clone(), future)) + self.transaction_builder().build(future) + } + + /// Creates a TransactionBuilder, which can later be used to create + /// a Transaction around a future. + /// + /// Use this when Client is moved into the future being built. + /// For example, when executing multiple statements that depend + /// on the previous statement's result. + pub fn transaction_builder(&self) -> TransactionBuilder { + TransactionBuilder(self.0.clone()) } pub fn batch_execute(&mut self, query: &str) -> BatchExecute { @@ -333,6 +343,19 @@ impl Row { } } +pub struct TransactionBuilder(proto::Client); + +impl TransactionBuilder { + pub fn build(self, future: T) -> Transaction + where + T: Future, + // FIXME error type? + T::Error: From, + { + Transaction(proto::TransactionFuture::new(self.0, future)) + } +} + #[must_use = "futures do nothing unless polled"] pub struct Transaction(proto::TransactionFuture) where diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 015a6b8e9..6ef5111ae 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -692,3 +692,52 @@ fn copy_out() { ).unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } + +#[test] +fn transaction_builder_around_moved_client() { + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let (mut client, connection) = runtime + .block_on(tokio_postgres::connect( + "postgres://postgres@localhost:5433".parse().unwrap(), + TlsMode::None, + )).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + + let transaction_builder = client.transaction_builder(); + let work = future::lazy(move || { + let execute = + client.batch_execute( + "CREATE TEMPORARY TABLE transaction_foo ( + id SERIAL, + name TEXT + )"); + + execute.and_then(move |_| { + client + .prepare("INSERT INTO transaction_foo (name) VALUES ($1), ($2)") + .map(|statement| (client, statement)) + }) + }).and_then(|(mut client, statement)| { + client + .query(&statement, &[&"jim", &"joe"]) + .collect() + .map(|_res| client) + }); + + let transaction = transaction_builder.build(work); + let mut client = runtime.block_on(transaction).unwrap(); + + let data = runtime + .block_on( + client + .prepare("COPY transaction_foo TO STDOUT") + .and_then(|s| client.copy_out(&s, &[]).concat2()), + ).unwrap(); + assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); + + drop(client); + runtime.run().unwrap(); +} From 0e60d80d4b8f8fe43b358c4a034edf44c941373e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 25 Nov 2018 08:49:28 -0800 Subject: [PATCH 012/819] Parameterize Connection over the stream --- tokio-postgres/src/lib.rs | 19 +++++++++++++------ tokio-postgres/src/proto/connection.rs | 20 +++++++++++++------- tokio-postgres/src/proto/handshake.rs | 2 +- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index a16dd15e6..a269d33de 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -28,6 +28,7 @@ use postgres_shared::rows::RowIndex; use std::error::Error as StdError; use std::fmt; use std::sync::atomic::{AtomicUsize, Ordering}; +use tokio_io::{AsyncRead, AsyncWrite}; #[doc(inline)] pub use postgres_shared::stmt::Column; @@ -38,7 +39,7 @@ pub use postgres_shared::{CancelData, Notification}; use error::{DbError, Error}; use params::ConnectParams; -use tls::TlsConnect; +use tls::{TlsConnect, TlsStream}; use types::{FromSql, ToSql, Type}; pub mod error; @@ -125,9 +126,12 @@ impl Client { } #[must_use = "futures do nothing unless polled"] -pub struct Connection(proto::Connection); +pub struct Connection(proto::Connection); -impl Connection { +impl Connection +where + S: AsyncRead + AsyncWrite, +{ pub fn cancel_data(&self) -> CancelData { self.0.cancel_data() } @@ -141,7 +145,10 @@ impl Connection { } } -impl Future for Connection { +impl Future for Connection +where + S: AsyncRead + AsyncWrite, +{ type Item = (); type Error = Error; @@ -173,10 +180,10 @@ impl Future for CancelQuery { pub struct Handshake(proto::HandshakeFuture); impl Future for Handshake { - type Item = (Client, Connection); + type Item = (Client, Connection>); type Error = Error; - fn poll(&mut self) -> Poll<(Client, Connection), Error> { + fn poll(&mut self) -> Poll<(Client, Connection>), Error> { let (client, connection) = try_ready!(self.0.poll()); Ok(Async::Ready((Client(client), Connection(connection)))) diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index 562ec6ee3..2f56d191c 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -5,10 +5,10 @@ use postgres_protocol::message::frontend; use std::collections::{HashMap, VecDeque}; use std::io; use tokio_codec::Framed; +use tokio_io::{AsyncRead, AsyncWrite}; use proto::codec::PostgresCodec; use proto::copy_in::CopyInReceiver; -use tls::TlsStream; use {AsyncMessage, CancelData, Notification}; use {DbError, Error}; @@ -32,8 +32,8 @@ enum State { Closing, } -pub struct Connection { - stream: Framed, PostgresCodec>, +pub struct Connection { + stream: Framed, cancel_data: CancelData, parameters: HashMap, receiver: mpsc::UnboundedReceiver, @@ -43,13 +43,16 @@ pub struct Connection { state: State, } -impl Connection { +impl Connection +where + S: AsyncRead + AsyncWrite, +{ pub fn new( - stream: Framed, PostgresCodec>, + stream: Framed, cancel_data: CancelData, parameters: HashMap, receiver: mpsc::UnboundedReceiver, - ) -> Connection { + ) -> Connection { Connection { stream, cancel_data, @@ -295,7 +298,10 @@ impl Connection { } } -impl Future for Connection { +impl Future for Connection +where + S: AsyncRead + AsyncWrite, +{ type Item = (); type Error = Error; diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 119669f75..99938557c 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -61,7 +61,7 @@ pub enum Handshake { parameters: HashMap, }, #[state_machine_future(ready)] - Finished((Client, Connection)), + Finished((Client, Connection>)), #[state_machine_future(error)] Failed(Error), } From 08b4020534fc0775251b27bb0f415ea074b18cd4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 26 Nov 2018 22:45:14 -0800 Subject: [PATCH 013/819] Overhaul connection APIs * `Connection` is now parameterized over the stream type, which can be any `AsyncRead + AsyncWrite`. * The `TlsMode` enum is now a trait, and `NoTls`, `PreferTls`, and `RequireTls` are types implementing that trait. * The `TlsConnect` trait no longer involves trait objects, and returns channel binding info alongside the stream type rather than requiring the stream to implement an additional trait. * The `connect` free function and `ConnectParams` type is gone in favor of a `Builder` type. It takes a pre-connected stream rather than automatically opening a TCP or Unix socket connection. Notably, we no longer have any dependency on the Tokio runtime. We do use the `tokio-codec` and `tokio-io` crates, but those don't actually depend on mio/tokio-reactor/etc. This means we can work with other futures-based networking stacks. We will almost certainly add back a convenience API that offers something akin to the old logic to open a TCP/Unix connection automatically but that will be worked out in a follow up PR. --- .circleci/config.yml | 2 +- docker/Dockerfile | 2 +- tokio-postgres-native-tls/Cargo.toml | 1 - tokio-postgres-native-tls/src/lib.rs | 103 ++---- tokio-postgres-native-tls/src/test.rs | 37 +- tokio-postgres-openssl/Cargo.toml | 3 +- tokio-postgres-openssl/src/lib.rs | 143 ++------ tokio-postgres-openssl/src/test.rs | 40 +- tokio-postgres/Cargo.toml | 7 +- tokio-postgres/src/builder.rs | 55 +++ tokio-postgres/src/error/mod.rs | 13 - tokio-postgres/src/lib.rs | 62 ++-- tokio-postgres/src/proto/cancel.rs | 43 ++- tokio-postgres/src/proto/connect.rs | 510 ++++++++++++++------------ tokio-postgres/src/proto/handshake.rs | 328 ----------------- tokio-postgres/src/proto/mod.rs | 7 +- tokio-postgres/src/proto/socket.rs | 84 ----- tokio-postgres/src/proto/tls.rs | 97 +++++ tokio-postgres/src/tls.rs | 269 ++++++++++++-- tokio-postgres/tests/test.rs | 209 +++++------ 20 files changed, 954 insertions(+), 1061 deletions(-) create mode 100644 tokio-postgres/src/builder.rs delete mode 100644 tokio-postgres/src/proto/handshake.rs delete mode 100644 tokio-postgres/src/proto/socket.rs create mode 100644 tokio-postgres/src/proto/tls.rs diff --git a/.circleci/config.yml b/.circleci/config.yml index d3770591d..f0f6b0066 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.26.2 + - image: rust:1.30.1 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:4 diff --git a/docker/Dockerfile b/docker/Dockerfile index 9e2642ba1..bd685d445 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres:11-beta1 +FROM postgres:11 COPY sql_setup.sh /docker-entrypoint-initdb.d/ diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 5e9562023..dc4270e8a 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -4,7 +4,6 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -bytes = "0.4" futures = "0.1" native-tls = "0.2" tokio-io = "0.1" diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index dee06f974..d792ba013 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -1,106 +1,71 @@ -extern crate bytes; -extern crate futures; extern crate native_tls; extern crate tokio_io; extern crate tokio_postgres; extern crate tokio_tls; +#[macro_use] +extern crate futures; + #[cfg(test)] extern crate tokio; -use bytes::{Buf, BufMut}; -use futures::{Future, Poll}; -use std::error::Error; -use std::io::{self, Read, Write}; +use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_postgres::tls::{Socket, TlsConnect, TlsStream}; +use tokio_postgres::{ChannelBinding, TlsConnect}; +use tokio_tls::{Connect, TlsStream}; #[cfg(test)] mod test; pub struct TlsConnector { connector: tokio_tls::TlsConnector, + domain: String, } impl TlsConnector { - pub fn new() -> Result { + pub fn new(domain: &str) -> Result { let connector = native_tls::TlsConnector::new()?; - Ok(TlsConnector::with_connector(connector)) + Ok(TlsConnector::with_connector(connector, domain)) } - pub fn with_connector(connector: native_tls::TlsConnector) -> TlsConnector { + pub fn with_connector(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { connector: tokio_tls::TlsConnector::from(connector), + domain: domain.to_string(), } } } -impl TlsConnect for TlsConnector { - fn connect( - &self, - domain: &str, - socket: Socket, - ) -> Box, Error = Box> + Sync + Send> { - let f = self - .connector - .connect(domain, socket) - .map(|s| { - let s: Box = Box::new(SslStream(s)); - s - }).map_err(|e| { - let e: Box = Box::new(e); - e - }); - Box::new(f) - } -} - -struct SslStream(tokio_tls::TlsStream); - -impl Read for SslStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } -} - -impl AsyncRead for SslStream { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } +impl TlsConnect for TlsConnector +where + S: AsyncRead + AsyncWrite, +{ + type Stream = TlsStream; + type Error = native_tls::Error; + type Future = TlsConnectFuture; - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - self.0.read_buf(buf) + fn connect(self, stream: S) -> TlsConnectFuture { + TlsConnectFuture(self.connector.connect(&self.domain, stream)) } } -impl Write for SslStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } +pub struct TlsConnectFuture(Connect); - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} +impl Future for TlsConnectFuture +where + S: AsyncRead + AsyncWrite, +{ + type Item = (TlsStream, ChannelBinding); + type Error = native_tls::Error; -impl AsyncWrite for SslStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.0.shutdown() - } + fn poll(&mut self) -> Poll<(TlsStream, ChannelBinding), native_tls::Error> { + let stream = try_ready!(self.0.poll()); + let mut channel_binding = ChannelBinding::new(); - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - self.0.write_buf(buf) - } -} + if let Some(buf) = stream.get_ref().tls_server_end_point().unwrap_or(None) { + channel_binding = channel_binding.tls_server_end_point(buf); + } -impl TlsStream for SslStream { - fn tls_server_end_point(&self) -> Option> { - self.0.get_ref().tls_server_end_point().unwrap_or(None) + Ok(Async::Ready((stream, channel_binding))) } } diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index a860729ab..8d081efbc 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -1,17 +1,24 @@ use futures::{Future, Stream}; use native_tls::{self, Certificate}; +use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::{self, TlsMode}; +use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; use TlsConnector; -fn smoke_test(url: &str, tls: TlsMode) { +fn smoke_test(builder: &tokio_postgres::Builder, tls: T) +where + T: TlsMode, + T::Stream: 'static, +{ let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect(url.parse().unwrap(), tls); + let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .map_err(|e| panic!("{}", e)) + .and_then(|s| builder.connect(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); + runtime.spawn(connection); let prepare = client.prepare("SELECT 1::INT4"); let statement = runtime.block_on(prepare).unwrap(); @@ -33,10 +40,11 @@ fn require() { Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), ).build() .unwrap(); - let connector = TlsConnector::with_connector(connector); smoke_test( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Require(Box::new(connector)), + tokio_postgres::Builder::new() + .user("ssl_user") + .database("postgres"), + RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -47,10 +55,11 @@ fn prefer() { Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), ).build() .unwrap(); - let connector = TlsConnector::with_connector(connector); smoke_test( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Prefer(Box::new(connector)), + tokio_postgres::Builder::new() + .user("ssl_user") + .database("postgres"), + PreferTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -61,9 +70,11 @@ fn scram_user() { Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), ).build() .unwrap(); - let connector = TlsConnector::with_connector(connector); smoke_test( - "postgres://scram_user:password@localhost:5433/postgres", - TlsMode::Require(Box::new(connector)), + tokio_postgres::Builder::new() + .user("scram_user") + .password("password") + .database("postgres"), + RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index 955349200..a97beedf5 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -4,11 +4,10 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -bytes = "0.4" futures = "0.1" openssl = "0.10" tokio-io = "0.1" -tokio-openssl = "0.2" +tokio-openssl = "0.3" tokio-postgres = { version = "0.3", path = "../tokio-postgres" } [dev-dependencies] diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 3a77218de..3da7987c3 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,141 +1,76 @@ -extern crate bytes; -extern crate futures; extern crate openssl; extern crate tokio_io; extern crate tokio_openssl; extern crate tokio_postgres; +#[macro_use] +extern crate futures; + #[cfg(test)] extern crate tokio; -use bytes::{Buf, BufMut}; -use futures::{Future, IntoFuture, Poll}; -use openssl::error::ErrorStack; -use openssl::ssl::{ConnectConfiguration, SslConnector, SslMethod, SslRef}; -use std::error::Error; -use std::io::{self, Read, Write}; +use futures::{Async, Future, Poll}; +use openssl::ssl::{ConnectConfiguration, HandshakeError, SslRef}; +use std::fmt::Debug; use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_openssl::ConnectConfigurationExt; -use tokio_postgres::tls::{Socket, TlsConnect, TlsStream}; +use tokio_openssl::{ConnectAsync, ConnectConfigurationExt, SslStream}; +use tokio_postgres::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; pub struct TlsConnector { - connector: SslConnector, - callback: Box Result<(), ErrorStack> + Sync + Send>, + ssl: ConnectConfiguration, + domain: String, } impl TlsConnector { - pub fn new() -> Result { - let connector = SslConnector::builder(SslMethod::tls())?.build(); - Ok(TlsConnector::with_connector(connector)) - } - - pub fn with_connector(connector: SslConnector) -> TlsConnector { + pub fn new(ssl: ConnectConfiguration, domain: &str) -> TlsConnector { TlsConnector { - connector, - callback: Box::new(|_| Ok(())), + ssl, + domain: domain.to_string(), } } - - pub fn set_callback(&mut self, f: F) - where - F: Fn(&mut ConnectConfiguration) -> Result<(), ErrorStack> + 'static + Sync + Send, - { - self.callback = Box::new(f); - } -} - -impl TlsConnect for TlsConnector { - fn connect( - &self, - domain: &str, - socket: Socket, - ) -> Box, Error = Box> + Sync + Send> { - let f = self - .connector - .configure() - .and_then(|mut ssl| (self.callback)(&mut ssl).map(|_| ssl)) - .map_err(|e| { - let e: Box = Box::new(e); - e - }) - .into_future() - .and_then({ - let domain = domain.to_string(); - move |ssl| { - ssl.connect_async(&domain, socket) - .map(|s| { - let s: Box = Box::new(SslStream(s)); - s - }) - .map_err(|e| { - let e: Box = Box::new(e); - e - }) - } - }); - Box::new(f) - } -} - -struct SslStream(tokio_openssl::SslStream); - -impl Read for SslStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } } -impl AsyncRead for SslStream { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } +impl TlsConnect for TlsConnector +where + S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, +{ + type Stream = SslStream; + type Error = HandshakeError; + type Future = TlsConnectFuture; - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - self.0.read_buf(buf) + fn connect(self, stream: S) -> TlsConnectFuture { + TlsConnectFuture(self.ssl.connect_async(&self.domain, stream)) } } -impl Write for SslStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } +pub struct TlsConnectFuture(ConnectAsync); - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} - -impl AsyncWrite for SslStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.0.shutdown() - } +impl Future for TlsConnectFuture +where + S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, +{ + type Item = (SslStream, ChannelBinding); + type Error = HandshakeError; - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - self.0.write_buf(buf) - } -} + fn poll(&mut self) -> Poll<(SslStream, ChannelBinding), HandshakeError> { + let stream = try_ready!(self.0.poll()); -impl TlsStream for SslStream { - fn tls_unique(&self) -> Option> { - let f = if self.0.get_ref().ssl().session_reused() { + let f = if stream.get_ref().ssl().session_reused() { SslRef::peer_finished } else { SslRef::finished }; - let len = f(self.0.get_ref().ssl(), &mut []); - let mut buf = vec![0; len]; - f(self.0.get_ref().ssl(), &mut buf); + let len = f(stream.get_ref().ssl(), &mut []); + let mut tls_unique = vec![0; len]; + f(stream.get_ref().ssl(), &mut tls_unique); - Some(buf) + Ok(Async::Ready(( + stream, + ChannelBinding::new().tls_unique(tls_unique), + ))) } } diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index f5999d148..7347a2425 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -1,17 +1,24 @@ use futures::{Future, Stream}; use openssl::ssl::{SslConnector, SslMethod}; +use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::{self, TlsMode}; +use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; use TlsConnector; -fn smoke_test(url: &str, tls: TlsMode) { +fn smoke_test(builder: &tokio_postgres::Builder, tls: T) +where + T: TlsMode, + T::Stream: 'static, +{ let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect(url.parse().unwrap(), tls); + let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .map_err(|e| panic!("{}", e)) + .and_then(|s| builder.connect(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); + runtime.spawn(connection); let prepare = client.prepare("SELECT 1::INT4"); let statement = runtime.block_on(prepare).unwrap(); @@ -30,10 +37,12 @@ fn smoke_test(url: &str, tls: TlsMode) { fn require() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); - let connector = TlsConnector::with_connector(builder.build()); + let ctx = builder.build(); smoke_test( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Require(Box::new(connector)), + tokio_postgres::Builder::new() + .user("ssl_user") + .database("postgres"), + RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -41,10 +50,12 @@ fn require() { fn prefer() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); - let connector = TlsConnector::with_connector(builder.build()); + let ctx = builder.build(); smoke_test( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Prefer(Box::new(connector)), + tokio_postgres::Builder::new() + .user("ssl_user") + .database("postgres"), + PreferTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -52,9 +63,12 @@ fn prefer() { fn scram_user() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); - let connector = TlsConnector::with_connector(builder.build()); + let ctx = builder.build(); smoke_test( - "postgres://scram_user:password@localhost:5433/postgres", - TlsMode::Require(Box::new(connector)), + tokio_postgres::Builder::new() + .user("scram_user") + .password("password") + .database("postgres"), + RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f41b69051..84a3a9bac 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -37,7 +37,6 @@ bytes = "0.4" fallible-iterator = "0.1.3" futures = "0.1.7" futures-cpupool = "0.1" -lazy_static = "1.0" log = "0.4" phf = "=0.7.22" postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } @@ -45,11 +44,7 @@ postgres-shared = { version = "0.4.0", path = "../postgres-shared" } state_machine_future = "0.1.7" tokio-codec = "0.1" tokio-io = "0.1" -tokio-tcp = "0.1" -tokio-timer = "0.2" - -[target.'cfg(unix)'.dependencies] -tokio-uds = "0.2.1" +void = "1.0" [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs new file mode 100644 index 000000000..d7436c2b6 --- /dev/null +++ b/tokio-postgres/src/builder.rs @@ -0,0 +1,55 @@ +use std::collections::HashMap; +use tokio_io::{AsyncRead, AsyncWrite}; + +use proto::ConnectFuture; +use {Connect, TlsMode}; + +#[derive(Clone)] +pub struct Builder { + params: HashMap, + password: Option, +} + +impl Builder { + pub fn new() -> Builder { + let mut params = HashMap::new(); + params.insert("client_encoding".to_string(), "UTF8".to_string()); + params.insert("timezone".to_string(), "GMT".to_string()); + + Builder { + params, + password: None, + } + } + + pub fn user(&mut self, user: &str) -> &mut Builder { + self.param("user", user) + } + + pub fn database(&mut self, database: &str) -> &mut Builder { + self.param("database", database) + } + + pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + self.params.insert(key.to_string(), value.to_string()); + self + } + + pub fn password(&mut self, password: &str) -> &mut Builder { + self.password = Some(password.to_string()); + self + } + + pub fn connect(&self, stream: S, tls_mode: T) -> Connect + where + S: AsyncRead + AsyncWrite, + T: TlsMode, + { + Connect(ConnectFuture::new( + stream, + tls_mode, + self.password.clone(), + self.params.clone(), + )) + } +} diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index de35c9b1a..642dfa463 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -5,7 +5,6 @@ use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; use std::error; use std::fmt; use std::io; -use tokio_timer; pub use self::sqlstate::*; @@ -348,8 +347,6 @@ enum Kind { MissingUser, MissingPassword, UnsupportedAuthentication, - Connect, - Timer, Authentication, } @@ -396,8 +393,6 @@ impl error::Error for Error { Kind::MissingUser => "username not provided", Kind::MissingPassword => "password not provided", Kind::UnsupportedAuthentication => "unsupported authentication method requested", - Kind::Connect => "error connecting to server", - Kind::Timer => "timer error", Kind::Authentication => "authentication error", } } @@ -489,14 +484,6 @@ impl Error { Error::new(Kind::Tls, Some(e)) } - pub(crate) fn connect(e: io::Error) -> Error { - Error::new(Kind::Connect, Some(Box::new(e))) - } - - pub(crate) fn timer(e: tokio_timer::Error) -> Error { - Error::new(Kind::Timer, Some(Box::new(e))) - } - pub(crate) fn io(e: io::Error) -> Error { Error::new(Kind::Io, Some(Box::new(e))) } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index a269d33de..c8e3e00dd 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -7,21 +7,15 @@ extern crate postgres_protocol; extern crate postgres_shared; extern crate tokio_codec; extern crate tokio_io; -extern crate tokio_tcp; -extern crate tokio_timer; +extern crate void; #[macro_use] extern crate futures; #[macro_use] -extern crate lazy_static; -#[macro_use] extern crate log; #[macro_use] extern crate state_machine_future; -#[cfg(unix)] -extern crate tokio_uds; - use bytes::Bytes; use futures::{Async, Future, Poll, Stream}; use postgres_shared::rows::RowIndex; @@ -37,14 +31,16 @@ pub use postgres_shared::{params, types}; #[doc(inline)] pub use postgres_shared::{CancelData, Notification}; -use error::{DbError, Error}; -use params::ConnectParams; -use tls::{TlsConnect, TlsStream}; +pub use builder::*; +pub use error::*; +use proto::CancelFuture; +pub use tls::*; use types::{FromSql, ToSql, Type}; +mod builder; pub mod error; mod proto; -pub mod tls; +mod tls; fn next_statement() -> String { static ID: AtomicUsize = AtomicUsize::new(0); @@ -56,18 +52,12 @@ fn next_portal() -> String { format!("p{}", ID.fetch_add(1, Ordering::SeqCst)) } -pub enum TlsMode { - None, - Prefer(Box), - Require(Box), -} - -pub fn cancel_query(params: ConnectParams, tls: TlsMode, cancel_data: CancelData) -> CancelQuery { - CancelQuery(proto::CancelFuture::new(params, tls, cancel_data)) -} - -pub fn connect(params: ConnectParams, tls: TlsMode) -> Handshake { - Handshake(proto::HandshakeFuture::new(params, tls)) +pub fn cancel_query(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelQuery +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + CancelQuery(CancelFuture::new(stream, tls_mode, cancel_data)) } pub struct Client(proto::Client); @@ -165,9 +155,16 @@ pub enum AsyncMessage { } #[must_use = "futures do nothing unless polled"] -pub struct CancelQuery(proto::CancelFuture); +pub struct CancelQuery(proto::CancelFuture) +where + S: AsyncRead + AsyncWrite, + T: TlsMode; -impl Future for CancelQuery { +impl Future for CancelQuery +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ type Item = (); type Error = Error; @@ -177,13 +174,20 @@ impl Future for CancelQuery { } #[must_use = "futures do nothing unless polled"] -pub struct Handshake(proto::HandshakeFuture); +pub struct Connect(proto::ConnectFuture) +where + S: AsyncRead + AsyncWrite, + T: TlsMode; -impl Future for Handshake { - type Item = (Client, Connection>); +impl Future for Connect +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + type Item = (Client, Connection); type Error = Error; - fn poll(&mut self) -> Poll<(Client, Connection>), Error> { + fn poll(&mut self) -> Poll<(Client, Connection), Error> { let (client, connection) = try_ready!(self.0.poll()); Ok(Async::Ready((Client(client), Connection(connection)))) diff --git a/tokio-postgres/src/proto/cancel.rs b/tokio-postgres/src/proto/cancel.rs index 138fb9bb6..0a5e2492e 100644 --- a/tokio-postgres/src/proto/cancel.rs +++ b/tokio-postgres/src/proto/cancel.rs @@ -2,35 +2,42 @@ use futures::{Future, Poll}; use postgres_protocol::message::frontend; use state_machine_future::RentToOwn; use tokio_io::io::{self, Flush, WriteAll}; +use tokio_io::{AsyncRead, AsyncWrite}; use error::Error; -use params::ConnectParams; -use proto::connect::ConnectFuture; -use tls::TlsStream; +use proto::TlsFuture; use {CancelData, TlsMode}; #[derive(StateMachineFuture)] -pub enum Cancel { +pub enum Cancel +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ #[state_machine_future(start, transitions(SendingCancel))] Start { - future: ConnectFuture, + future: TlsFuture, cancel_data: CancelData, }, #[state_machine_future(transitions(FlushingCancel))] SendingCancel { - future: WriteAll, Vec>, + future: WriteAll>, }, #[state_machine_future(transitions(Finished))] - FlushingCancel { future: Flush> }, + FlushingCancel { future: Flush }, #[state_machine_future(ready)] Finished(()), #[state_machine_future(error)] Failed(Error), } -impl PollCancel for Cancel { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let stream = try_ready!(state.future.poll()); +impl PollCancel for Cancel +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let (stream, _) = try_ready!(state.future.poll()); let mut buf = vec![]; frontend::cancel_request( @@ -45,8 +52,8 @@ impl PollCancel for Cancel { } fn poll_sending_cancel<'a>( - state: &'a mut RentToOwn<'a, SendingCancel>, - ) -> Poll { + state: &'a mut RentToOwn<'a, SendingCancel>, + ) -> Poll, Error> { let (stream, _) = try_ready_closed!(state.future.poll()); transition!(FlushingCancel { @@ -55,15 +62,19 @@ impl PollCancel for Cancel { } fn poll_flushing_cancel<'a>( - state: &'a mut RentToOwn<'a, FlushingCancel>, + state: &'a mut RentToOwn<'a, FlushingCancel>, ) -> Poll { try_ready_closed!(state.future.poll()); transition!(Finished(())) } } -impl CancelFuture { - pub fn new(params: ConnectParams, mode: TlsMode, cancel_data: CancelData) -> CancelFuture { - Cancel::start(ConnectFuture::new(params, mode), cancel_data) +impl CancelFuture +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + pub fn new(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelFuture { + Cancel::start(TlsFuture::new(stream, tls_mode), cancel_data) } } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 00da8117e..659be30a2 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,288 +1,330 @@ -use futures::{Async, Future, Poll}; -use futures_cpupool::{CpuFuture, CpuPool}; +use fallible_iterator::FallibleIterator; +use futures::sink; +use futures::sync::mpsc; +use futures::{Future, Poll, Sink, Stream}; +use postgres_protocol::authentication; +use postgres_protocol::authentication::sasl::{self, ScramSha256}; +use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use state_machine_future::RentToOwn; -use std::error::Error as StdError; +use std::collections::HashMap; use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::time::{Duration, Instant}; -use std::vec; -use tokio_io::io::{read_exact, write_all, ReadExact, WriteAll}; -use tokio_tcp::{self, TcpStream}; -use tokio_timer::Delay; +use tokio_codec::Framed; +use tokio_io::{AsyncRead, AsyncWrite}; -#[cfg(unix)] -use tokio_uds::{self, UnixStream}; - -use params::{ConnectParams, Host}; -use proto::socket::Socket; -use tls::{self, TlsConnect, TlsStream}; -use {Error, TlsMode}; - -lazy_static! { - static ref DNS_POOL: CpuPool = CpuPool::new(2); -} +use proto::{Client, Connection, PostgresCodec, TlsFuture}; +use {CancelData, ChannelBinding, Error, TlsMode}; #[derive(StateMachineFuture)] -pub enum Connect { - #[state_machine_future(start)] - #[cfg_attr( - unix, - state_machine_future(transitions(ResolvingDns, ConnectingUnix)) - )] - #[cfg_attr(not(unix), state_machine_future(transitions(ResolvingDns)))] - Start { params: ConnectParams, tls: TlsMode }, - #[state_machine_future(transitions(ConnectingTcp))] - ResolvingDns { - future: CpuFuture, io::Error>, - timeout: Option, - params: ConnectParams, - tls: TlsMode, +pub enum Connect +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + #[state_machine_future(start, transitions(SendingStartup))] + Start { + future: TlsFuture, + password: Option, + params: HashMap, + }, + #[state_machine_future(transitions(ReadingAuth))] + SendingStartup { + future: sink::Send>, + user: String, + password: Option, + channel_binding: ChannelBinding, }, - #[state_machine_future(transitions(PreparingSsl))] - ConnectingTcp { - addrs: vec::IntoIter, - future: tokio_tcp::ConnectFuture, - timeout: Option<(Duration, Delay)>, - params: ConnectParams, - tls: TlsMode, + #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] + ReadingAuth { + stream: Framed, + user: String, + password: Option, + channel_binding: ChannelBinding, }, - #[cfg(unix)] - #[state_machine_future(transitions(PreparingSsl))] - ConnectingUnix { - future: tokio_uds::ConnectFuture, - timeout: Option, - params: ConnectParams, - tls: TlsMode, + #[state_machine_future(transitions(ReadingAuthCompletion))] + SendingPassword { + future: sink::Send>, }, - #[state_machine_future(transitions(Ready, SendingSsl))] - PreparingSsl { - socket: Socket, - params: ConnectParams, - tls: TlsMode, + #[state_machine_future(transitions(ReadingSasl))] + SendingSasl { + future: sink::Send>, + scram: ScramSha256, }, - #[state_machine_future(transitions(ReadingSsl))] - SendingSsl { - future: WriteAll>, - params: ConnectParams, - connector: Box, - required: bool, + #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] + ReadingSasl { + stream: Framed, + scram: ScramSha256, }, - #[state_machine_future(transitions(ConnectingTls, Ready))] - ReadingSsl { - future: ReadExact, - params: ConnectParams, - connector: Box, - required: bool, + #[state_machine_future(transitions(ReadingInfo))] + ReadingAuthCompletion { + stream: Framed, }, - #[state_machine_future(transitions(Ready))] - ConnectingTls { - future: - Box, Error = Box> + Sync + Send>, - params: ConnectParams, + #[state_machine_future(transitions(Finished))] + ReadingInfo { + stream: Framed, + cancel_data: Option, + parameters: HashMap, }, #[state_machine_future(ready)] - Ready(Box), + Finished((Client, Connection)), #[state_machine_future(error)] Failed(Error), } -impl PollConnect for Connect { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); +impl PollConnect for Connect +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let (stream, channel_binding) = try_ready!(state.future.poll()); + let mut state = state.take(); - let timeout = state.params.connect_timeout(); - let port = state.params.port(); + let mut buf = vec![]; + frontend::startup_message(state.params.iter().map(|(k, v)| (&**k, &**v)), &mut buf) + .map_err(Error::encode)?; - match state.params.host().clone() { - Host::Tcp(host) => transition!(ResolvingDns { - future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), - params: state.params, - tls: state.tls, - timeout, - }), - #[cfg(unix)] - Host::Unix(mut path) => { - path.push(format!(".s.PGSQL.{}", port)); - transition!(ConnectingUnix { - future: UnixStream::connect(path), - timeout: timeout.map(|t| Delay::new(Instant::now() + t)), - params: state.params, - tls: state.tls, - }) - }, - #[cfg(not(unix))] - Host::Unix(_) => { - Err(Error::connect(io::Error::new( - io::ErrorKind::Other, - "unix sockets are not supported on this platform", - ))) - }, - } - } + let stream = Framed::new(stream, PostgresCodec); - fn poll_resolving_dns<'a>( - state: &'a mut RentToOwn<'a, ResolvingDns>, - ) -> Poll { - let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); - let state = state.take(); + let user = state + .params + .remove("user") + .ok_or_else(Error::missing_user)?; - let addr = match addrs.next() { - Some(addr) => addr, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::Other, - "resolved to 0 addresses", - ))) - } - }; + transition!(SendingStartup { + future: stream.send(buf), + user, + password: state.password, + channel_binding, + }) + } - transition!(ConnectingTcp { - addrs, - future: TcpStream::connect(&addr), - timeout: state.timeout.map(|t| (t, Delay::new(Instant::now() + t))), - params: state.params, - tls: state.tls, + fn poll_sending_startup<'a>( + state: &'a mut RentToOwn<'a, SendingStartup>, + ) -> Poll, Error> { + let stream = try_ready!(state.future.poll().map_err(Error::io)); + let state = state.take(); + transition!(ReadingAuth { + stream, + user: state.user, + password: state.password, + channel_binding: state.channel_binding, }) } - fn poll_connecting_tcp<'a>( - state: &'a mut RentToOwn<'a, ConnectingTcp>, - ) -> Poll { - let socket = loop { - let error = match state.future.poll() { - Ok(Async::Ready(socket)) => break socket, - Ok(Async::NotReady) => match state.timeout { - Some((_, ref mut delay)) => { - try_ready!(delay.poll().map_err(Error::timer)); - io::Error::new(io::ErrorKind::TimedOut, "connection timed out") + fn poll_reading_auth<'a>( + state: &'a mut RentToOwn<'a, ReadingAuth>, + ) -> Poll, Error> { + let message = try_ready!(state.stream.poll().map_err(Error::io)); + let state = state.take(); + + match message { + Some(Message::AuthenticationOk) => transition!(ReadingInfo { + stream: state.stream, + cancel_data: None, + parameters: HashMap::new(), + }), + Some(Message::AuthenticationCleartextPassword) => { + let pass = state.password.ok_or_else(Error::missing_password)?; + let mut buf = vec![]; + frontend::password_message(&pass, &mut buf).map_err(Error::encode)?; + transition!(SendingPassword { + future: state.stream.send(buf) + }) + } + Some(Message::AuthenticationMd5Password(body)) => { + let pass = state.password.ok_or_else(Error::missing_password)?; + let output = + authentication::md5_hash(state.user.as_bytes(), pass.as_bytes(), body.salt()); + let mut buf = vec![]; + frontend::password_message(&output, &mut buf).map_err(Error::encode)?; + transition!(SendingPassword { + future: state.stream.send(buf) + }) + } + Some(Message::AuthenticationSasl(body)) => { + let pass = state.password.ok_or_else(Error::missing_password)?; + + let mut has_scram = false; + let mut has_scram_plus = false; + let mut mechanisms = body.mechanisms(); + while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? { + match mechanism { + sasl::SCRAM_SHA_256 => has_scram = true, + sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true, + _ => {} } - None => return Ok(Async::NotReady), - }, - Err(e) => e, - }; + } - let addr = match state.addrs.next() { - Some(addr) => addr, - None => return Err(Error::connect(error)), - }; + let channel_binding = if let Some(tls_unique) = state.channel_binding.tls_unique { + Some(sasl::ChannelBinding::tls_unique(tls_unique)) + } else if let Some(tls_server_end_point) = + state.channel_binding.tls_server_end_point + { + Some(sasl::ChannelBinding::tls_server_end_point( + tls_server_end_point, + )) + } else { + None + }; - state.future = TcpStream::connect(&addr); - if let Some((timeout, ref mut delay)) = state.timeout { - delay.reset(Instant::now() + timeout); - } - }; + let (channel_binding, mechanism) = if has_scram_plus { + match channel_binding { + Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), + None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), + } + } else if has_scram { + match channel_binding { + Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), + None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), + } + } else { + return Err(Error::unsupported_authentication()); + }; - // Our read/write patterns may trigger Nagle's algorithm since we're pipelining which - // we don't want. Each individual write should be a full command we want the backend to - // see immediately. - socket.set_nodelay(true).map_err(Error::connect)?; + let mut scram = ScramSha256::new(pass.as_bytes(), channel_binding); - let state = state.take(); - transition!(PreparingSsl { - socket: Socket::Tcp(socket), - params: state.params, - tls: state.tls, - }) - } + let mut buf = vec![]; + frontend::sasl_initial_response(mechanism, scram.message(), &mut buf) + .map_err(Error::encode)?; - #[cfg(unix)] - fn poll_connecting_unix<'a>( - state: &'a mut RentToOwn<'a, ConnectingUnix>, - ) -> Poll { - match state.future.poll().map_err(Error::connect)? { - Async::Ready(socket) => { - let state = state.take(); - transition!(PreparingSsl { - socket: Socket::Unix(socket), - params: state.params, - tls: state.tls, + transition!(SendingSasl { + future: state.stream.send(buf), + scram, }) } - Async::NotReady => match state.timeout { - Some(ref mut delay) => { - try_ready!(delay.poll().map_err(Error::timer)); - Err(Error::connect(io::Error::new( - io::ErrorKind::TimedOut, - "connection timed out", - ))) - } - None => Ok(Async::NotReady), - }, + Some(Message::AuthenticationKerberosV5) + | Some(Message::AuthenticationScmCredential) + | Some(Message::AuthenticationGss) + | Some(Message::AuthenticationSspi) => Err(Error::unsupported_authentication()), + Some(Message::ErrorResponse(body)) => Err(Error::db(body)), + Some(_) => Err(Error::unexpected_message()), + None => Err(Error::closed()), } } - fn poll_preparing_ssl<'a>( - state: &'a mut RentToOwn<'a, PreparingSsl>, - ) -> Poll { - let state = state.take(); - - let (connector, required) = match state.tls { - TlsMode::None => { - transition!(Ready(Box::new(state.socket))); - } - TlsMode::Prefer(connector) => (connector, false), - TlsMode::Require(connector) => (connector, true), - }; - - let mut buf = vec![]; - frontend::ssl_request(&mut buf); - transition!(SendingSsl { - future: write_all(state.socket, buf), - params: state.params, - connector, - required, - }) + fn poll_sending_password<'a>( + state: &'a mut RentToOwn<'a, SendingPassword>, + ) -> Poll, Error> { + let stream = try_ready!(state.future.poll().map_err(Error::io)); + transition!(ReadingAuthCompletion { stream }) } - fn poll_sending_ssl<'a>( - state: &'a mut RentToOwn<'a, SendingSsl>, - ) -> Poll { - let (stream, _) = try_ready_closed!(state.future.poll()); + fn poll_sending_sasl<'a>( + state: &'a mut RentToOwn<'a, SendingSasl>, + ) -> Poll, Error> { + let stream = try_ready!(state.future.poll().map_err(Error::io)); let state = state.take(); - transition!(ReadingSsl { - future: read_exact(stream, [0]), - params: state.params, - connector: state.connector, - required: state.required, + transition!(ReadingSasl { + stream, + scram: state.scram, }) } - fn poll_reading_ssl<'a>( - state: &'a mut RentToOwn<'a, ReadingSsl>, - ) -> Poll { - let (stream, buf) = try_ready_closed!(state.future.poll()); - let state = state.take(); + fn poll_reading_sasl<'a>( + state: &'a mut RentToOwn<'a, ReadingSasl>, + ) -> Poll, Error> { + let message = try_ready!(state.stream.poll().map_err(Error::io)); + let mut state = state.take(); - match buf[0] { - b'S' => { - let future = match state.params.host() { - Host::Tcp(domain) => state.connector.connect(domain, tls::Socket(stream)), - Host::Unix(_) => { - return Err(Error::tls("TLS over unix sockets not supported".into())) - } - }; - transition!(ConnectingTls { - future, - params: state.params, + match message { + Some(Message::AuthenticationSaslContinue(body)) => { + state + .scram + .update(body.data()) + .map_err(Error::authentication)?; + let mut buf = vec![]; + frontend::sasl_response(state.scram.message(), &mut buf).map_err(Error::encode)?; + transition!(SendingSasl { + future: state.stream.send(buf), + scram: state.scram, }) } - b'N' if !state.required => transition!(Ready(Box::new(stream))), - b'N' => Err(Error::tls("TLS was required but not supported".into())), - _ => Err(Error::unexpected_message()), + Some(Message::AuthenticationSaslFinal(body)) => { + state + .scram + .finish(body.data()) + .map_err(Error::authentication)?; + transition!(ReadingAuthCompletion { + stream: state.stream + }) + } + Some(Message::ErrorResponse(body)) => Err(Error::db(body)), + Some(_) => Err(Error::unexpected_message()), + None => Err(Error::closed()), } } - fn poll_connecting_tls<'a>( - state: &'a mut RentToOwn<'a, ConnectingTls>, - ) -> Poll { - let stream = try_ready!(state.future.poll().map_err(Error::tls)); - transition!(Ready(stream)) + fn poll_reading_auth_completion<'a>( + state: &'a mut RentToOwn<'a, ReadingAuthCompletion>, + ) -> Poll, Error> { + let message = try_ready!(state.stream.poll().map_err(Error::io)); + let state = state.take(); + + match message { + Some(Message::AuthenticationOk) => transition!(ReadingInfo { + stream: state.stream, + cancel_data: None, + parameters: HashMap::new() + }), + Some(Message::ErrorResponse(body)) => Err(Error::db(body)), + Some(_) => Err(Error::unexpected_message()), + None => Err(Error::closed()), + } + } + + fn poll_reading_info<'a>( + state: &'a mut RentToOwn<'a, ReadingInfo>, + ) -> Poll, Error> { + loop { + let message = try_ready!(state.stream.poll().map_err(Error::io)); + match message { + Some(Message::BackendKeyData(body)) => { + state.cancel_data = Some(CancelData { + process_id: body.process_id(), + secret_key: body.secret_key(), + }); + } + Some(Message::ParameterStatus(body)) => { + state.parameters.insert( + body.name().map_err(Error::parse)?.to_string(), + body.value().map_err(Error::parse)?.to_string(), + ); + } + Some(Message::ReadyForQuery(_)) => { + let state = state.take(); + let cancel_data = state.cancel_data.ok_or_else(|| { + Error::parse(io::Error::new( + io::ErrorKind::InvalidData, + "BackendKeyData message missing", + )) + })?; + let (sender, receiver) = mpsc::unbounded(); + let client = Client::new(sender); + let connection = + Connection::new(state.stream, cancel_data, state.parameters, receiver); + transition!(Finished((client, connection))) + } + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(Message::NoticeResponse(_)) => {} + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + } + } } } -impl ConnectFuture { - pub fn new(params: ConnectParams, tls: TlsMode) -> ConnectFuture { - Connect::start(params, tls) +impl ConnectFuture +where + S: AsyncRead + AsyncWrite, + T: TlsMode, +{ + pub fn new( + stream: S, + tls_mode: T, + password: Option, + params: HashMap, + ) -> ConnectFuture { + Connect::start(TlsFuture::new(stream, tls_mode), password, params) } } diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs deleted file mode 100644 index 99938557c..000000000 --- a/tokio-postgres/src/proto/handshake.rs +++ /dev/null @@ -1,328 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::sink; -use futures::sync::mpsc; -use futures::{Future, Poll, Sink, Stream}; -use postgres_protocol::authentication; -use postgres_protocol::authentication::sasl::{self, ChannelBinding, ScramSha256}; -use postgres_protocol::message::backend::Message; -use postgres_protocol::message::frontend; -use state_machine_future::RentToOwn; -use std::collections::HashMap; -use std::io; -use tokio_codec::Framed; - -use params::{ConnectParams, User}; -use proto::client::Client; -use proto::codec::PostgresCodec; -use proto::connect::ConnectFuture; -use proto::connection::Connection; -use tls::TlsStream; -use {CancelData, Error, TlsMode}; - -#[derive(StateMachineFuture)] -pub enum Handshake { - #[state_machine_future(start, transitions(SendingStartup))] - Start { - future: ConnectFuture, - params: ConnectParams, - }, - #[state_machine_future(transitions(ReadingAuth))] - SendingStartup { - future: sink::Send, PostgresCodec>>, - user: User, - }, - #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] - ReadingAuth { - stream: Framed, PostgresCodec>, - user: User, - }, - #[state_machine_future(transitions(ReadingAuthCompletion))] - SendingPassword { - future: sink::Send, PostgresCodec>>, - }, - #[state_machine_future(transitions(ReadingSasl))] - SendingSasl { - future: sink::Send, PostgresCodec>>, - scram: ScramSha256, - }, - #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] - ReadingSasl { - stream: Framed, PostgresCodec>, - scram: ScramSha256, - }, - #[state_machine_future(transitions(ReadingInfo))] - ReadingAuthCompletion { - stream: Framed, PostgresCodec>, - }, - #[state_machine_future(transitions(Finished))] - ReadingInfo { - stream: Framed, PostgresCodec>, - cancel_data: Option, - parameters: HashMap, - }, - #[state_machine_future(ready)] - Finished((Client, Connection>)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollHandshake for Handshake { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let stream = try_ready!(state.future.poll()); - let state = state.take(); - - let user = match state.params.user() { - Some(user) => user.clone(), - None => return Err(Error::missing_user()), - }; - - let mut buf = vec![]; - { - let options = state - .params - .options() - .iter() - .map(|&(ref key, ref value)| (&**key, &**value)); - let client_encoding = Some(("client_encoding", "UTF8")); - let timezone = Some(("timezone", "GMT")); - let user = Some(("user", user.name())); - let database = state.params.database().map(|s| ("database", s)); - - frontend::startup_message( - options - .chain(client_encoding) - .chain(timezone) - .chain(user) - .chain(database), - &mut buf, - ).map_err(Error::encode)?; - } - - let stream = Framed::new(stream, PostgresCodec); - transition!(SendingStartup { - future: stream.send(buf), - user, - }) - } - - fn poll_sending_startup<'a>( - state: &'a mut RentToOwn<'a, SendingStartup>, - ) -> Poll { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingAuth { - stream, - user: state.user, - }) - } - - fn poll_reading_auth<'a>( - state: &'a mut RentToOwn<'a, ReadingAuth>, - ) -> Poll { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let state = state.take(); - - match message { - Some(Message::AuthenticationOk) => transition!(ReadingInfo { - stream: state.stream, - cancel_data: None, - parameters: HashMap::new(), - }), - Some(Message::AuthenticationCleartextPassword) => { - let pass = state.user.password().ok_or_else(Error::missing_password)?; - let mut buf = vec![]; - frontend::password_message(pass, &mut buf).map_err(Error::encode)?; - transition!(SendingPassword { - future: state.stream.send(buf) - }) - } - Some(Message::AuthenticationMd5Password(body)) => { - let pass = state.user.password().ok_or_else(Error::missing_password)?; - let output = authentication::md5_hash( - state.user.name().as_bytes(), - pass.as_bytes(), - body.salt(), - ); - let mut buf = vec![]; - frontend::password_message(&output, &mut buf).map_err(Error::encode)?; - transition!(SendingPassword { - future: state.stream.send(buf) - }) - } - Some(Message::AuthenticationSasl(body)) => { - let pass = state.user.password().ok_or_else(Error::missing_password)?; - - let mut has_scram = false; - let mut has_scram_plus = false; - let mut mechanisms = body.mechanisms(); - while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? { - match mechanism { - sasl::SCRAM_SHA_256 => has_scram = true, - sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true, - _ => {} - } - } - let channel_binding = state - .stream - .get_ref() - .tls_unique() - .map(ChannelBinding::tls_unique) - .or_else(|| { - state - .stream - .get_ref() - .tls_server_end_point() - .map(ChannelBinding::tls_server_end_point) - }); - - let (channel_binding, mechanism) = if has_scram_plus { - match channel_binding { - Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), - None => (ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else if has_scram { - match channel_binding { - Some(_) => (ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), - None => (ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else { - return Err(Error::unsupported_authentication()); - }; - - let mut scram = ScramSha256::new(pass.as_bytes(), channel_binding); - - let mut buf = vec![]; - frontend::sasl_initial_response(mechanism, scram.message(), &mut buf) - .map_err(Error::encode)?; - - transition!(SendingSasl { - future: state.stream.send(buf), - scram, - }) - } - Some(Message::AuthenticationKerberosV5) - | Some(Message::AuthenticationScmCredential) - | Some(Message::AuthenticationGss) - | Some(Message::AuthenticationSspi) => Err(Error::unsupported_authentication()), - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_sending_password<'a>( - state: &'a mut RentToOwn<'a, SendingPassword>, - ) -> Poll { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - transition!(ReadingAuthCompletion { stream }) - } - - fn poll_sending_sasl<'a>( - state: &'a mut RentToOwn<'a, SendingSasl>, - ) -> Poll { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingSasl { - stream, - scram: state.scram - }) - } - - fn poll_reading_sasl<'a>( - state: &'a mut RentToOwn<'a, ReadingSasl>, - ) -> Poll { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let mut state = state.take(); - - match message { - Some(Message::AuthenticationSaslContinue(body)) => { - state - .scram - .update(body.data()) - .map_err(Error::authentication)?; - let mut buf = vec![]; - frontend::sasl_response(state.scram.message(), &mut buf).map_err(Error::encode)?; - transition!(SendingSasl { - future: state.stream.send(buf), - scram: state.scram, - }) - } - Some(Message::AuthenticationSaslFinal(body)) => { - state - .scram - .finish(body.data()) - .map_err(Error::authentication)?; - transition!(ReadingAuthCompletion { - stream: state.stream, - }) - } - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_reading_auth_completion<'a>( - state: &'a mut RentToOwn<'a, ReadingAuthCompletion>, - ) -> Poll { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let state = state.take(); - - match message { - Some(Message::AuthenticationOk) => transition!(ReadingInfo { - stream: state.stream, - cancel_data: None, - parameters: HashMap::new(), - }), - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_reading_info<'a>( - state: &'a mut RentToOwn<'a, ReadingInfo>, - ) -> Poll { - loop { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - match message { - Some(Message::BackendKeyData(body)) => { - state.cancel_data = Some(CancelData { - process_id: body.process_id(), - secret_key: body.secret_key(), - }); - } - Some(Message::ParameterStatus(body)) => { - state.parameters.insert( - body.name().map_err(Error::parse)?.to_string(), - body.value().map_err(Error::parse)?.to_string(), - ); - } - Some(Message::ReadyForQuery(_)) => { - let state = state.take(); - let cancel_data = state.cancel_data.ok_or_else(|| { - Error::parse(io::Error::new( - io::ErrorKind::InvalidData, - "BackendKeyData message missing", - )) - })?; - let (sender, receiver) = mpsc::unbounded(); - let client = Client::new(sender); - let connection = - Connection::new(state.stream, cancel_data, state.parameters, receiver); - transition!(Finished((client, connection))) - } - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(Message::NoticeResponse(_)) => {} - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - } - } - } -} - -impl HandshakeFuture { - pub fn new(params: ConnectParams, tls: TlsMode) -> HandshakeFuture { - Handshake::start(ConnectFuture::new(params.clone(), tls), params) - } -} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 6471badfa..7cf5512cc 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -27,14 +27,13 @@ mod connection; mod copy_in; mod copy_out; mod execute; -mod handshake; mod portal; mod prepare; mod query; mod row; mod simple_query; -mod socket; mod statement; +mod tls; mod transaction; mod typeinfo; mod typeinfo_composite; @@ -44,16 +43,16 @@ pub use proto::bind::BindFuture; pub use proto::cancel::CancelFuture; pub use proto::client::Client; pub use proto::codec::PostgresCodec; +pub use proto::connect::ConnectFuture; pub use proto::connection::Connection; pub use proto::copy_in::CopyInFuture; pub use proto::copy_out::CopyOutStream; pub use proto::execute::ExecuteFuture; -pub use proto::handshake::HandshakeFuture; pub use proto::portal::Portal; pub use proto::prepare::PrepareFuture; pub use proto::query::QueryStream; pub use proto::row::Row; pub use proto::simple_query::SimpleQueryFuture; -pub use proto::socket::Socket; pub use proto::statement::Statement; +pub use proto::tls::TlsFuture; pub use proto::transaction::TransactionFuture; diff --git a/tokio-postgres/src/proto/socket.rs b/tokio-postgres/src/proto/socket.rs deleted file mode 100644 index f6de498ae..000000000 --- a/tokio-postgres/src/proto/socket.rs +++ /dev/null @@ -1,84 +0,0 @@ -use bytes::{Buf, BufMut}; -use futures::Poll; -use std::io::{self, Read, Write}; -use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_tcp::TcpStream; - -#[cfg(unix)] -use tokio_uds::UnixStream; - -pub enum Socket { - Tcp(TcpStream), - #[cfg(unix)] - Unix(UnixStream), -} - -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match self { - Socket::Tcp(stream) => stream.read(buf), - #[cfg(unix)] - Socket::Unix(stream) => stream.read(buf), - } - } -} - -impl AsyncRead for Socket { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - match self { - Socket::Tcp(stream) => stream.prepare_uninitialized_buffer(buf), - #[cfg(unix)] - Socket::Unix(stream) => stream.prepare_uninitialized_buffer(buf), - } - } - - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - match self { - Socket::Tcp(stream) => stream.read_buf(buf), - #[cfg(unix)] - Socket::Unix(stream) => stream.read_buf(buf), - } - } -} - -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - match self { - Socket::Tcp(stream) => stream.write(buf), - #[cfg(unix)] - Socket::Unix(stream) => stream.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - match self { - Socket::Tcp(stream) => stream.flush(), - #[cfg(unix)] - Socket::Unix(stream) => stream.flush(), - } - } -} - -impl AsyncWrite for Socket { - fn shutdown(&mut self) -> Poll<(), io::Error> { - match self { - Socket::Tcp(stream) => stream.shutdown(), - #[cfg(unix)] - Socket::Unix(stream) => stream.shutdown(), - } - } - - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - match self { - Socket::Tcp(stream) => stream.write_buf(buf), - #[cfg(unix)] - Socket::Unix(stream) => stream.write_buf(buf), - } - } -} diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs new file mode 100644 index 000000000..a24394056 --- /dev/null +++ b/tokio-postgres/src/proto/tls.rs @@ -0,0 +1,97 @@ +use futures::{Future, Poll}; +use postgres_protocol::message::frontend; +use state_machine_future::RentToOwn; +use tokio_io::io::{self, ReadExact, WriteAll}; +use tokio_io::{AsyncRead, AsyncWrite}; + +use {ChannelBinding, Error, TlsMode}; + +#[derive(StateMachineFuture)] +pub enum Tls +where + T: TlsMode, + S: AsyncRead + AsyncWrite, +{ + #[state_machine_future(start, transitions(SendingTls, ConnectingTls))] + Start { stream: S, tls_mode: T }, + #[state_machine_future(transitions(ReadingTls))] + SendingTls { + future: WriteAll>, + tls_mode: T, + }, + #[state_machine_future(transitions(ConnectingTls))] + ReadingTls { + future: ReadExact, + tls_mode: T, + }, + #[state_machine_future(transitions(Ready))] + ConnectingTls { future: T::Future }, + #[state_machine_future(ready)] + Ready((T::Stream, ChannelBinding)), + #[state_machine_future(error)] + Failed(Error), +} + +impl PollTls for Tls +where + T: TlsMode, + S: AsyncRead + AsyncWrite, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let state = state.take(); + + if state.tls_mode.request_tls() { + let mut buf = vec![]; + frontend::ssl_request(&mut buf); + + transition!(SendingTls { + future: io::write_all(state.stream, buf), + tls_mode: state.tls_mode, + }) + } else { + transition!(ConnectingTls { + future: state.tls_mode.handle_tls(false, state.stream), + }) + } + } + + fn poll_sending_tls<'a>( + state: &'a mut RentToOwn<'a, SendingTls>, + ) -> Poll, Error> { + let (stream, _) = try_ready!(state.future.poll().map_err(Error::io)); + let state = state.take(); + transition!(ReadingTls { + future: io::read_exact(stream, [0]), + tls_mode: state.tls_mode, + }) + } + + fn poll_reading_tls<'a>( + state: &'a mut RentToOwn<'a, ReadingTls>, + ) -> Poll, Error> { + let (stream, buf) = try_ready!(state.future.poll().map_err(Error::io)); + let state = state.take(); + + let use_tls = buf[0] == b'S'; + transition!(ConnectingTls { + future: state.tls_mode.handle_tls(use_tls, stream) + }) + } + + fn poll_connecting_tls<'a>( + state: &'a mut RentToOwn<'a, ConnectingTls>, + ) -> Poll, Error> { + let t = try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); + transition!(Ready(t)) + } +} + +impl TlsFuture +where + T: TlsMode, + S: AsyncRead + AsyncWrite, +{ + pub fn new(stream: S, tls_mode: T) -> TlsFuture { + Tls::start(stream, tls_mode) + } +} diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 95766118e..2ba1a7dc8 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -1,83 +1,274 @@ use bytes::{Buf, BufMut}; -use futures::{Future, Poll}; +use futures::future::{self, FutureResult}; +use futures::{Async, Future, Poll}; use std::error::Error; +use std::fmt; use std::io::{self, Read, Write}; use tokio_io::{AsyncRead, AsyncWrite}; +use void::Void; -use proto; +pub struct ChannelBinding { + pub(crate) tls_server_end_point: Option>, + pub(crate) tls_unique: Option>, +} + +impl ChannelBinding { + pub fn new() -> ChannelBinding { + ChannelBinding { + tls_server_end_point: None, + tls_unique: None, + } + } + + pub fn tls_server_end_point(mut self, tls_server_end_point: Vec) -> ChannelBinding { + self.tls_server_end_point = Some(tls_server_end_point); + self + } + + pub fn tls_unique(mut self, tls_unique: Vec) -> ChannelBinding { + self.tls_unique = Some(tls_unique); + self + } +} + +pub trait TlsMode { + type Stream: AsyncRead + AsyncWrite; + type Error: Into>; + type Future: Future; + + fn request_tls(&self) -> bool; + + fn handle_tls(self, use_tls: bool, stream: S) -> Self::Future; +} + +pub trait TlsConnect { + type Stream: AsyncRead + AsyncWrite; + type Error: Into>; + type Future: Future; + + fn connect(self, stream: S) -> Self::Future; +} + +#[derive(Debug, Copy, Clone)] +pub struct NoTls; + +impl TlsMode for NoTls +where + S: AsyncRead + AsyncWrite, +{ + type Stream = S; + type Error = Void; + type Future = FutureResult<(S, ChannelBinding), Void>; + + fn request_tls(&self) -> bool { + false + } + + fn handle_tls(self, use_tls: bool, stream: S) -> FutureResult<(S, ChannelBinding), Void> { + debug_assert!(!use_tls); + + future::ok((stream, ChannelBinding::new())) + } +} + +#[derive(Debug, Copy, Clone)] +pub struct PreferTls(pub T); + +impl TlsMode for PreferTls +where + T: TlsConnect, + S: AsyncRead + AsyncWrite, +{ + type Stream = MaybeTlsStream; + type Error = T::Error; + type Future = PreferTlsFuture; + + fn request_tls(&self) -> bool { + true + } + + fn handle_tls(self, use_tls: bool, stream: S) -> PreferTlsFuture { + let f = if use_tls { + PreferTlsFutureInner::Tls(self.0.connect(stream)) + } else { + PreferTlsFutureInner::Raw(Some(stream)) + }; + + PreferTlsFuture(f) + } +} + +enum PreferTlsFutureInner { + Tls(F), + Raw(Option), +} -pub struct Socket(pub(crate) proto::Socket); +pub struct PreferTlsFuture(PreferTlsFutureInner); + +impl Future for PreferTlsFuture +where + F: Future, +{ + type Item = (MaybeTlsStream, ChannelBinding); + type Error = F::Error; + + fn poll(&mut self) -> Poll<(MaybeTlsStream, ChannelBinding), F::Error> { + match &mut self.0 { + PreferTlsFutureInner::Tls(f) => { + let (stream, channel_binding) = try_ready!(f.poll()); + Ok(Async::Ready((MaybeTlsStream::Tls(stream), channel_binding))) + } + PreferTlsFutureInner::Raw(s) => Ok(Async::Ready(( + MaybeTlsStream::Raw(s.take().expect("future polled after completion")), + ChannelBinding::new(), + ))), + } + } +} + +pub enum MaybeTlsStream { + Tls(T), + Raw(U), +} -impl Read for Socket { +impl Read for MaybeTlsStream +where + T: Read, + U: Read, +{ fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) + match self { + MaybeTlsStream::Tls(s) => s.read(buf), + MaybeTlsStream::Raw(s) => s.read(buf), + } } } -impl AsyncRead for Socket { +impl AsyncRead for MaybeTlsStream +where + T: AsyncRead, + U: AsyncRead, +{ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - self.0.prepare_uninitialized_buffer(buf) + match self { + MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), + MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), + } } fn read_buf(&mut self, buf: &mut B) -> Poll where B: BufMut, { - self.0.read_buf(buf) + match self { + MaybeTlsStream::Tls(s) => s.read_buf(buf), + MaybeTlsStream::Raw(s) => s.read_buf(buf), + } } } -impl Write for Socket { +impl Write for MaybeTlsStream +where + T: Write, + U: Write, +{ fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) + match self { + MaybeTlsStream::Tls(s) => s.write(buf), + MaybeTlsStream::Raw(s) => s.write(buf), + } } fn flush(&mut self) -> io::Result<()> { - self.0.flush() + match self { + MaybeTlsStream::Tls(s) => s.flush(), + MaybeTlsStream::Raw(s) => s.flush(), + } } } -impl AsyncWrite for Socket { +impl AsyncWrite for MaybeTlsStream +where + T: AsyncWrite, + U: AsyncWrite, +{ fn shutdown(&mut self) -> Poll<(), io::Error> { - self.0.shutdown() + match self { + MaybeTlsStream::Tls(s) => s.shutdown(), + MaybeTlsStream::Raw(s) => s.shutdown(), + } } fn write_buf(&mut self, buf: &mut B) -> Poll where B: Buf, { - self.0.write_buf(buf) + match self { + MaybeTlsStream::Tls(s) => s.write_buf(buf), + MaybeTlsStream::Raw(s) => s.write_buf(buf), + } } } -pub trait TlsConnect: Sync + Send { - fn connect( - &self, - domain: &str, - socket: Socket, - ) -> Box, Error = Box> + Sync + Send>; -} +#[derive(Debug, Copy, Clone)] +pub struct RequireTls(pub T); + +impl TlsMode for RequireTls +where + T: TlsConnect, +{ + type Stream = T::Stream; + type Error = Box; + type Future = RequireTlsFuture; -pub trait TlsStream: 'static + Sync + Send + AsyncRead + AsyncWrite { - /// Returns the data associated with the `tls-unique` channel binding type as described in - /// [RFC 5929], if supported. - /// - /// An implementation only needs to support at most one of this or `tls_server_end_point`. - /// - /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 - fn tls_unique(&self) -> Option> { - None + fn request_tls(&self) -> bool { + true } - /// Returns the data associated with the `tls-server-end-point` channel binding type as - /// described in [RFC 5929], if supported. - /// - /// An implementation only needs to support at most one of this or `tls_unique`. - /// - /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 - fn tls_server_end_point(&self) -> Option> { - None + fn handle_tls(self, use_tls: bool, stream: S) -> RequireTlsFuture { + let f = if use_tls { + Ok(self.0.connect(stream)) + } else { + Err(TlsUnsupportedError(()).into()) + }; + + RequireTlsFuture { f: Some(f) } + } +} + +#[derive(Debug)] +pub struct TlsUnsupportedError(()); + +impl fmt::Display for TlsUnsupportedError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("TLS was required but not supported by the server") } } -impl TlsStream for proto::Socket {} +impl Error for TlsUnsupportedError {} + +pub struct RequireTlsFuture { + f: Option>>, +} + +impl Future for RequireTlsFuture +where + T: Future, + T::Error: Into>, +{ + type Item = T::Item; + type Error = Box; + + fn poll(&mut self) -> Poll> { + match self.f.take().expect("future polled after completion") { + Ok(mut f) => match f.poll().map_err(Into::into)? { + Async::Ready(r) => Ok(Async::Ready(r)), + Async::NotReady => { + self.f = Some(Ok(f)); + Ok(Async::NotReady) + } + }, + Err(e) => Err(e), + } + } +} diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 015a6b8e9..cda4a6e75 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -12,18 +12,28 @@ use futures::stream; use futures::sync::mpsc; use std::error::Error; use std::time::{Duration, Instant}; +use tokio::net::TcpStream; use tokio::prelude::*; use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, TlsMode}; +use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; + +fn connect( + builder: &tokio_postgres::Builder, +) -> impl Future), Error = tokio_postgres::Error> { + let builder = builder.clone(); + TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .map_err(|e| panic!("{}", e)) + .and_then(move |s| builder.connect(s, NoTls)) +} -fn smoke_test(url: &str) { +fn smoke_test(builder: &tokio_postgres::Builder) { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect(url.parse().unwrap(), TlsMode::None); + let handshake = connect(builder); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -46,9 +56,10 @@ fn plain_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://pass_user@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("pass_user") + .database("postgres"), ); runtime.block_on(handshake).err().unwrap(); } @@ -58,9 +69,11 @@ fn plain_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://pass_user:foo@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("pass_user") + .password("foo") + .database("postgres"), ); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), @@ -71,7 +84,12 @@ fn plain_password_wrong() { #[test] fn plain_password_ok() { - smoke_test("postgres://pass_user:password@localhost:5433/postgres"); + smoke_test( + tokio_postgres::Builder::new() + .user("pass_user") + .password("password") + .database("postgres"), + ); } #[test] @@ -79,9 +97,10 @@ fn md5_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://md5_user@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("md5_user") + .database("postgres"), ); runtime.block_on(handshake).err().unwrap(); } @@ -91,9 +110,11 @@ fn md5_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://md5_user:foo@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("md5_user") + .password("foo") + .database("postgres"), ); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), @@ -104,7 +125,12 @@ fn md5_password_wrong() { #[test] fn md5_password_ok() { - smoke_test("postgres://md5_user:password@localhost:5433/postgres"); + smoke_test( + tokio_postgres::Builder::new() + .user("md5_user") + .password("password") + .database("postgres"), + ); } #[test] @@ -112,9 +138,10 @@ fn scram_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://scram_user@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("scram_user") + .database("postgres"), ); runtime.block_on(handshake).err().unwrap(); } @@ -124,9 +151,11 @@ fn scram_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://scram_user:foo@localhost:5433".parse().unwrap(), - TlsMode::None, + let handshake = connect( + tokio_postgres::Builder::new() + .user("scram_user") + .password("foo") + .database("postgres"), ); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), @@ -137,7 +166,12 @@ fn scram_password_wrong() { #[test] fn scram_password_ok() { - smoke_test("postgres://scram_user:password@localhost:5433/postgres"); + smoke_test( + tokio_postgres::Builder::new() + .user("scram_user") + .password("password") + .database("postgres"), + ); } #[test] @@ -145,11 +179,9 @@ fn pipelined_prepare() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -167,11 +199,9 @@ fn insert_select() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -203,11 +233,9 @@ fn query_portal() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -246,11 +274,9 @@ fn cancel_query() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let cancel_data = connection.cancel_data(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -265,11 +291,10 @@ fn cancel_query() { let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) .then(|r| { r.unwrap(); - tokio_postgres::cancel_query( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - cancel_data, - ) + TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + }).then(|r| { + let s = r.unwrap(); + tokio_postgres::cancel_query(s, NoTls, cancel_data) }).then(|r| { r.unwrap(); Ok::<(), ()>(()) @@ -283,11 +308,9 @@ fn custom_enum() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -320,11 +343,9 @@ fn custom_domain() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -346,11 +367,9 @@ fn custom_array() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -373,11 +392,9 @@ fn custom_composite() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -413,11 +430,9 @@ fn custom_range() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -442,11 +457,9 @@ fn custom_simple() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let (mut client, connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -463,11 +476,9 @@ fn notifications() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - ); - let (mut client, mut connection) = runtime.block_on(handshake).unwrap(); + let (mut client, mut connection) = runtime + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let (tx, rx) = mpsc::unbounded(); let connection = future::poll_fn(move || { @@ -512,10 +523,8 @@ fn transaction_commit() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -547,10 +556,8 @@ fn transaction_abort() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -584,10 +591,8 @@ fn copy_in() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -628,10 +633,8 @@ fn copy_in_error() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -668,10 +671,8 @@ fn copy_out() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); From a0ba0ce2149c94dd440e92e6ad5cc1a56c82fafb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 19:31:39 -0800 Subject: [PATCH 014/819] Fix geo test --- postgres/tests/types/geo.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/postgres/tests/types/geo.rs b/postgres/tests/types/geo.rs index bcde561fc..43a95debe 100644 --- a/postgres/tests/types/geo.rs +++ b/postgres/tests/types/geo.rs @@ -22,8 +22,11 @@ fn test_box_params() { &[ ( Some(Rect { - min: Coordinate { x: -3.14, y: 1.618, }, - max: Coordinate { x: 160.0, y: 69701.5615, }, + min: Coordinate { x: -3.14, y: 1.618 }, + max: Coordinate { + x: 160.0, + y: 69701.5615, + }, }), "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", ), @@ -35,9 +38,12 @@ fn test_box_params() { #[test] fn test_path_params() { let points = vec![ - Point::new(0.0, 0.0), - Point::new(-3.14, 1.618), - Point::new(160.0, 69701.5615), + Coordinate { x: 0., y: 0. }, + Coordinate { x: -3.14, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, ]; test_type( "PATH", From 52dd0b6780a2d09f29556ebb5f0a01e9711fe7b9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 19:32:29 -0800 Subject: [PATCH 015/819] rustfmt --- postgres-protocol/src/lib.rs | 9 +- postgres-protocol/src/message/frontend.rs | 53 +++++-- postgres-shared/src/error/sqlstate.rs | 45 ++++-- postgres-shared/src/lib.rs | 4 +- postgres-shared/src/params/url.rs | 93 +++++++---- postgres-shared/src/rows.rs | 6 +- postgres-shared/src/types/chrono.rs | 5 +- postgres-shared/src/types/geo.rs | 15 +- postgres-shared/src/types/mod.rs | 6 +- postgres-shared/src/types/type_gen.rs | 4 +- postgres/src/macros.rs | 8 +- postgres/src/notification.rs | 42 +++-- postgres/src/params.rs | 2 +- postgres/src/priv_io.rs | 6 +- postgres/src/stmt.rs | 180 ++++++++-------------- postgres/src/text_rows.rs | 23 ++- postgres/src/transaction.rs | 13 +- postgres/tests/test.rs | 4 +- postgres/tests/types/chrono.rs | 6 +- postgres/tests/types/mod.rs | 6 +- postgres/tests/types/uuid.rs | 4 +- tokio-postgres/src/error/sqlstate.rs | 45 ++++-- 22 files changed, 306 insertions(+), 273 deletions(-) diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index f49165ff3..da06a4c36 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,7 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url="https://docs.rs/postgres-protocol/0.3")] +#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.3")] #![warn(missing_docs)] extern crate base64; extern crate byteorder; @@ -68,13 +68,16 @@ macro_rules! from_usize { #[inline] fn from_usize(x: usize) -> io::Result<$t> { if x > <$t>::max_value() as usize { - Err(io::Error::new(io::ErrorKind::InvalidInput, "value too large to transmit")) + Err(io::Error::new( + io::ErrorKind::InvalidInput, + "value too large to transmit", + )) } else { Ok(x as $t) } } } - } + }; } from_usize!(i16); diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index a340df0ce..a0c20a836 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -1,12 +1,12 @@ //! Frontend message serialization. #![allow(missing_docs)] -use byteorder::{WriteBytesExt, BigEndian, ByteOrder}; +use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; use std::error::Error; use std::io; use std::marker; -use {Oid, FromUsize, IsNull, write_nullable}; +use {write_nullable, FromUsize, IsNull, Oid}; pub enum Message<'a> { Bind { @@ -16,24 +16,51 @@ pub enum Message<'a> { values: &'a [Option>], result_formats: &'a [i16], }, - CancelRequest { process_id: i32, secret_key: i32 }, - Close { variant: u8, name: &'a str }, - CopyData { data: &'a [u8] }, + CancelRequest { + process_id: i32, + secret_key: i32, + }, + Close { + variant: u8, + name: &'a str, + }, + CopyData { + data: &'a [u8], + }, CopyDone, - CopyFail { message: &'a str }, - Describe { variant: u8, name: &'a str }, - Execute { portal: &'a str, max_rows: i32 }, + CopyFail { + message: &'a str, + }, + Describe { + variant: u8, + name: &'a str, + }, + Execute { + portal: &'a str, + max_rows: i32, + }, Parse { name: &'a str, query: &'a str, param_types: &'a [Oid], }, - PasswordMessage { password: &'a str }, - Query { query: &'a str }, - SaslInitialResponse { mechanism: &'a str, data: &'a [u8] }, - SaslResponse { data: &'a [u8] }, + PasswordMessage { + password: &'a str, + }, + Query { + query: &'a str, + }, + SaslInitialResponse { + mechanism: &'a str, + data: &'a [u8], + }, + SaslResponse { + data: &'a [u8], + }, SslRequest, - StartupMessage { parameters: &'a [(String, String)] }, + StartupMessage { + parameters: &'a [(String, String)], + }, Sync, Terminate, #[doc(hidden)] diff --git a/postgres-shared/src/error/sqlstate.rs b/postgres-shared/src/error/sqlstate.rs index c8e3ec2eb..7dddfc7e2 100644 --- a/postgres-shared/src/error/sqlstate.rs +++ b/postgres-shared/src/error/sqlstate.rs @@ -33,7 +33,8 @@ impl SqlState { pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("01003")); /// 01007 pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); @@ -51,7 +52,8 @@ impl SqlState { pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = + SqlState(Cow::Borrowed("02001")); /// 03000 pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); @@ -66,10 +68,12 @@ impl SqlState { pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08001")); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08004")); /// 08007 pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); @@ -105,7 +109,8 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Cow::Borrowed("0Z002")); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = + SqlState(Cow::Borrowed("0Z002")); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); @@ -159,7 +164,8 @@ impl SqlState { pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("2201G")); /// 22018 pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); @@ -192,7 +198,8 @@ impl SqlState { pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = + SqlState(Cow::Borrowed("2201X")); /// 2202H pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); @@ -303,22 +310,27 @@ impl SqlState { pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = + SqlState(Cow::Borrowed("25008")); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25003")); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25004")); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25004")); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25005")); /// 25006 pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = + SqlState(Cow::Borrowed("25007")); /// 25P01 pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); @@ -345,7 +357,8 @@ impl SqlState { pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = + SqlState(Cow::Borrowed("2B000")); /// 2BP01 pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); @@ -357,7 +370,8 @@ impl SqlState { pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = + SqlState(Cow::Borrowed("2F005")); /// 2F002 pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); @@ -699,7 +713,8 @@ impl SqlState { pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = + SqlState(Cow::Borrowed("HV090")); /// HV00A pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); diff --git a/postgres-shared/src/lib.rs b/postgres-shared/src/lib.rs index 521779b34..84506f7db 100644 --- a/postgres-shared/src/lib.rs +++ b/postgres-shared/src/lib.rs @@ -1,15 +1,15 @@ #![allow(unknown_lints)] // for clippy -extern crate hex; extern crate fallible_iterator; +extern crate hex; extern crate phf; extern crate postgres_protocol; pub mod error; pub mod params; -pub mod types; pub mod rows; pub mod stmt; +pub mod types; /// Contains information necessary to cancel queries for a session. #[derive(Copy, Clone, Debug)] diff --git a/postgres-shared/src/params/url.rs b/postgres-shared/src/params/url.rs index 549beebfb..e965f2152 100644 --- a/postgres-shared/src/params/url.rs +++ b/postgres-shared/src/params/url.rs @@ -7,8 +7,8 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::str::FromStr; use hex::FromHex; +use std::str::FromStr; pub struct Url { pub scheme: String, @@ -128,31 +128,29 @@ fn decode_inner(c: &str, full_url: bool) -> DecodeResult { let bytes = match (iter.next(), iter.next()) { (Some(one), Some(two)) => [one, two], _ => { - return Err( - "Malformed input: found '%' without two \ - trailing bytes" - .to_owned(), - ) + return Err("Malformed input: found '%' without two \ + trailing bytes" + .to_owned()) } }; let bytes_from_hex = match Vec::::from_hex(&bytes) { Ok(b) => b, _ => { - return Err( - "Malformed input: found '%' followed by \ + return Err("Malformed input: found '%' followed by \ invalid hex values. Character '%' must \ escaped." - .to_owned(), - ) + .to_owned()) } }; // Only decode some characters if full_url: match bytes_from_hex[0] as char { // gen-delims: - ':' | '/' | '?' | '#' | '[' | ']' | '@' | '!' | '$' | '&' | '"' | - '(' | ')' | '*' | '+' | ',' | ';' | '=' if full_url => { + ':' | '/' | '?' | '#' | '[' | ']' | '@' | '!' | '$' | '&' | '"' + | '(' | ')' | '*' | '+' | ',' | ';' | '=' + if full_url => + { out.push('%'); out.push(bytes[0] as char); out.push(bytes[1] as char); @@ -221,18 +219,18 @@ pub fn get_scheme(rawurl: &str) -> DecodeResult<(&str, &str)> { // returns userinfo, host, port, and unparsed part, or an error fn get_authority(rawurl: &str) -> DecodeResult<(Option, &str, Option, &str)> { enum State { - Start, // starting state + Start, // starting state PassHostPort, // could be in user or port - Ip6Port, // either in ipv6 host or port - Ip6Host, // are in an ipv6 host - InHost, // are in a host - may be ipv6, but don't know yet - InPort, // are in port + Ip6Port, // either in ipv6 host or port + Ip6Host, // are in an ipv6 host + InHost, // are in a host - may be ipv6, but don't know yet + InPort, // are in port } #[derive(Clone, PartialEq)] enum Input { - Digit, // all digits - Hex, // digits and letters a-f + Digit, // all digits + Hex, // digits and letters a-f Unreserved, // all other legal characters } @@ -263,8 +261,23 @@ fn get_authority(rawurl: &str) -> DecodeResult<(Option, &str, Option { // separators, don't change anything } @@ -366,26 +379,42 @@ fn get_authority(rawurl: &str) -> DecodeResult<(Option, &str, Option None, - opt => { - match opt.and_then(|p| FromStr::from_str(p).ok()) { - None => return Err(format!("Failed to parse port: {:?}", port)), - opt => opt, - } - } + opt => match opt.and_then(|p| FromStr::from_str(p).ok()) { + None => return Err(format!("Failed to parse port: {:?}", port)), + opt => opt, + }, }; Ok((userinfo, host, port, rest)) } - // returns the path and unparsed part of url, or an error fn get_path(rawurl: &str, is_authority: bool) -> DecodeResult<(String, &str)> { let len = rawurl.len(); let mut end = len; for (i, c) in rawurl.chars().enumerate() { match c { - 'A'...'Z' | 'a'...'z' | '0'...'9' | '&' | '\'' | '(' | ')' | '.' | '@' | ':' | - '%' | '/' | '+' | '!' | '*' | ',' | ';' | '=' | '_' | '-' | '~' => continue, + 'A'...'Z' + | 'a'...'z' + | '0'...'9' + | '&' + | '\'' + | '(' + | ')' + | '.' + | '@' + | ':' + | '%' + | '/' + | '+' + | '!' + | '*' + | ',' + | ';' + | '=' + | '_' + | '-' + | '~' => continue, '?' | '#' => { end = i; break; @@ -395,9 +424,7 @@ fn get_path(rawurl: &str, is_authority: bool) -> DecodeResult<(String, &str)> { } if is_authority && end != 0 && !rawurl.starts_with('/') { - Err( - "Non-empty path must begin with '/' in presence of authority.".to_owned(), - ) + Err("Non-empty path must begin with '/' in presence of authority.".to_owned()) } else { Ok((decode_component(&rawurl[0..end])?, &rawurl[end..len])) } diff --git a/postgres-shared/src/rows.rs b/postgres-shared/src/rows.rs index 48de53601..2b0f8860a 100644 --- a/postgres-shared/src/rows.rs +++ b/postgres-shared/src/rows.rs @@ -59,11 +59,7 @@ where } } -impl<'a, T> RowIndex for &'a T -where - T: ?Sized + Sealed, -{ -} +impl<'a, T> RowIndex for &'a T where T: ?Sized + Sealed {} #[doc(hidden)] pub struct RowData { diff --git a/postgres-shared/src/types/chrono.rs b/postgres-shared/src/types/chrono.rs index 0f305ea1e..2ab7da3b2 100644 --- a/postgres-shared/src/types/chrono.rs +++ b/postgres-shared/src/types/chrono.rs @@ -1,7 +1,8 @@ extern crate chrono; -use self::chrono::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, - Utc}; +use self::chrono::{ + DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc, +}; use postgres_protocol::types; use std::error::Error; diff --git a/postgres-shared/src/types/geo.rs b/postgres-shared/src/types/geo.rs index c09b0c566..82f9d317e 100644 --- a/postgres-shared/src/types/geo.rs +++ b/postgres-shared/src/types/geo.rs @@ -30,8 +30,14 @@ impl<'a> FromSql<'a> for Rect { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let rect = types::box_from_sql(raw)?; Ok(Rect { - min: Coordinate { x: rect.lower_left().x(), y: rect.lower_left().y(), }, - max: Coordinate { x: rect.upper_right().x(), y: rect.upper_right().y(), }, + min: Coordinate { + x: rect.lower_left().x(), + y: rect.lower_left().y(), + }, + max: Coordinate { + x: rect.upper_right().x(), + y: rect.upper_right().y(), + }, }) } @@ -51,7 +57,10 @@ impl ToSql for Rect { impl<'a> FromSql<'a> for LineString { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let path = types::path_from_sql(raw)?; - let points = path.points().map(|p| Coordinate { x: p.x(), y: p.y() }).collect()?; + let points = path + .points() + .map(|p| Coordinate { x: p.x(), y: p.y() }) + .collect()?; Ok(LineString(points)) } diff --git a/postgres-shared/src/types/mod.rs b/postgres-shared/src/types/mod.rs index 627720806..9fcfef277 100644 --- a/postgres-shared/src/types/mod.rs +++ b/postgres-shared/src/types/mod.rs @@ -337,11 +337,7 @@ pub trait FromSql<'a>: Sized { /// This is primarily useful for trait bounds on functions. pub trait FromSqlOwned: for<'a> FromSql<'a> {} -impl FromSqlOwned for T -where - T: for<'a> FromSql<'a>, -{ -} +impl FromSqlOwned for T where T: for<'a> FromSql<'a> {} impl<'a, T: FromSql<'a>> FromSql<'a> for Option { fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { diff --git a/postgres-shared/src/types/type_gen.rs b/postgres-shared/src/types/type_gen.rs index 3992112ea..4ce0c5fba 100644 --- a/postgres-shared/src/types/type_gen.rs +++ b/postgres-shared/src/types/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use types::{Type, Oid, Kind}; +use types::{Kind, Oid, Type}; #[derive(PartialEq, Eq, Debug)] pub struct Other { @@ -1834,4 +1834,4 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); -} \ No newline at end of file +} diff --git a/postgres/src/macros.rs b/postgres/src/macros.rs index ba6d11ff4..939a0344a 100644 --- a/postgres/src/macros.rs +++ b/postgres/src/macros.rs @@ -19,21 +19,21 @@ macro_rules! check_desync { } macro_rules! bad_response { - ($s:expr) => ({ + ($s:expr) => {{ debug!("Bad response at {}:{}", file!(), line!()); $s.desynchronized = true; return Err(::bad_response().into()); - }) + }}; } #[cfg(feature = "no-logging")] macro_rules! debug { - ($($t:tt)*) => {} + ($($t:tt)*) => {}; } #[cfg(feature = "no-logging")] macro_rules! info { - ($($t:tt)*) => {} + ($($t:tt)*) => {}; } /// Generates a simple implementation of `ToSql::accepts` which accepts the diff --git a/postgres/src/notification.rs b/postgres/src/notification.rs index addbd748e..73eb98626 100644 --- a/postgres/src/notification.rs +++ b/postgres/src/notification.rs @@ -1,17 +1,17 @@ //! Asynchronous notifications. +use error::DbError; use fallible_iterator::{FallibleIterator, IntoFallibleIterator}; +use postgres_protocol::message::backend::{self, ErrorFields}; use std::fmt; use std::time::Duration; -use postgres_protocol::message::backend::{self, ErrorFields}; -use error::DbError; #[doc(inline)] use postgres_shared; pub use postgres_shared::Notification; -use {desynchronized, Result, Connection}; use error::Error; +use {desynchronized, Connection, Result}; /// Notifications from the Postgres backend. pub struct Notifications<'conn> { @@ -105,13 +105,11 @@ impl<'a> FallibleIterator for Iter<'a> { } match conn.read_message_with_notification_nonblocking() { - Ok(Some(backend::Message::NotificationResponse(body))) => { - Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })) - } + Ok(Some(backend::Message::NotificationResponse(body))) => Ok(Some(Notification { + process_id: body.process_id(), + channel: body.channel()?.to_owned(), + payload: body.message()?.to_owned(), + })), Ok(Some(backend::Message::ErrorResponse(body))) => Err(err(&mut body.fields())), Ok(None) => Ok(None), Err(err) => Err(err.into()), @@ -145,13 +143,11 @@ impl<'a> FallibleIterator for BlockingIter<'a> { } match conn.read_message_with_notification() { - Ok(backend::Message::NotificationResponse(body)) => { - Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })) - } + Ok(backend::Message::NotificationResponse(body)) => Ok(Some(Notification { + process_id: body.process_id(), + channel: body.channel()?.to_owned(), + payload: body.message()?.to_owned(), + })), Ok(backend::Message::ErrorResponse(body)) => Err(err(&mut body.fields())), Err(err) => Err(err.into()), _ => unreachable!(), @@ -182,13 +178,11 @@ impl<'a> FallibleIterator for TimeoutIter<'a> { } match conn.read_message_with_notification_timeout(self.timeout) { - Ok(Some(backend::Message::NotificationResponse(body))) => { - Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })) - } + Ok(Some(backend::Message::NotificationResponse(body))) => Ok(Some(Notification { + process_id: body.process_id(), + channel: body.channel()?.to_owned(), + payload: body.message()?.to_owned(), + })), Ok(Some(backend::Message::ErrorResponse(body))) => Err(err(&mut body.fields())), Ok(None) => Ok(None), Err(err) => Err(err.into()), diff --git a/postgres/src/params.rs b/postgres/src/params.rs index eb521f109..29e512286 100644 --- a/postgres/src/params.rs +++ b/postgres/src/params.rs @@ -1,3 +1,3 @@ //! Connection parameters -pub use postgres_shared::params::{Builder, ConnectParams, User, Host, IntoConnectParams}; +pub use postgres_shared::params::{Builder, ConnectParams, Host, IntoConnectParams, User}; diff --git a/postgres/src/priv_io.rs b/postgres/src/priv_io.rs index b761910bc..c0e3a1318 100644 --- a/postgres/src/priv_io.rs +++ b/postgres/src/priv_io.rs @@ -59,7 +59,8 @@ impl MessageStream { fn read_in(&mut self) -> io::Result<()> { self.in_buf.reserve(1); - match self.stream + match self + .stream .get_mut() .read(unsafe { self.in_buf.bytes_mut() }) { @@ -206,8 +207,7 @@ fn open_socket(params: &ConnectParams) -> Result { io::ErrorKind::InvalidInput, "could not resolve any addresses", ) - }) - .into()) + }).into()) } #[cfg(unix)] Host::Unix(ref path) => { diff --git a/postgres/src/stmt.rs b/postgres/src/stmt.rs index 500da8080..640cbcb28 100644 --- a/postgres/src/stmt.rs +++ b/postgres/src/stmt.rs @@ -1,20 +1,20 @@ //! Prepared statements use fallible_iterator::FallibleIterator; +use postgres_protocol::message::{backend, frontend}; +use postgres_shared::rows::RowData; use std::cell::Cell; use std::collections::VecDeque; use std::fmt; use std::io::{self, Read, Write}; use std::sync::Arc; -use postgres_protocol::message::{backend, frontend}; -use postgres_shared::rows::RowData; #[doc(inline)] pub use postgres_shared::stmt::Column; -use types::{Type, ToSql}; -use rows::{Rows, LazyRows}; +use rows::{LazyRows, Rows}; use transaction::Transaction; +use types::{ToSql, Type}; use {bad_response, err, Connection, Result, StatementInfo}; /// A prepared statement. @@ -135,13 +135,7 @@ impl<'conn> Statement<'conn> { pub fn execute(&self, params: &[&ToSql]) -> Result { let mut conn = self.conn.0.borrow_mut(); check_desync!(conn); - conn.raw_execute( - &self.info.name, - "", - 0, - self.param_types(), - params, - )?; + conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; let num; loop { @@ -163,9 +157,8 @@ impl<'conn> Statement<'conn> { conn.stream.write_message(|buf| { frontend::copy_fail("COPY queries cannot be directly executed", buf) })?; - conn.stream.write_message( - |buf| Ok::<(), io::Error>(frontend::sync(buf)), - )?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; conn.stream.flush()?; } backend::Message::CopyOutResponse(_) => { @@ -269,7 +262,7 @@ impl<'conn> Statement<'conn> { assert!( self.conn as *const _ == trans.conn() as *const _, "the `Transaction` passed to `lazy_query` must be associated with the same \ - `Connection` as the `Statement`" + `Connection` as the `Statement`" ); let conn = self.conn.0.borrow(); check_desync!(conn); @@ -284,12 +277,8 @@ impl<'conn> Statement<'conn> { let portal_name = format!("{}p{}", self.info.name, id); let mut rows = VecDeque::new(); - let more_rows = self.inner_query( - &portal_name, - row_limit, - params, - |row| rows.push_back(row), - )?; + let more_rows = + self.inner_query(&portal_name, row_limit, params, |row| rows.push_back(row))?; Ok(LazyRows::new( self, rows, @@ -324,36 +313,29 @@ impl<'conn> Statement<'conn> { /// ``` pub fn copy_in(&self, params: &[&ToSql], r: &mut R) -> Result { let mut conn = self.conn.0.borrow_mut(); - conn.raw_execute( - &self.info.name, - "", - 0, - self.param_types(), - params, - )?; + conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; let (format, column_formats) = match conn.read_message()? { backend::Message::CopyInResponse(body) => { let format = body.format(); - let column_formats = body.column_formats().map(|f| Format::from_u16(f)).collect()?; + let column_formats = body + .column_formats() + .map(|f| Format::from_u16(f)) + .collect()?; (format, column_formats) } backend::Message::ErrorResponse(body) => { conn.wait_for_ready()?; return Err(err(&mut body.fields())); } - _ => { - loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err( - io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_in` on a non-`COPY FROM STDIN` statement", - ).into(), - ); - } + _ => loop { + if let backend::Message::ReadyForQuery(_) = conn.read_message()? { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "called `copy_in` on a non-`COPY FROM STDIN` statement", + ).into()); } - } + }, }; let info = CopyInfo { @@ -366,20 +348,16 @@ impl<'conn> Statement<'conn> { match fill_copy_buf(&mut buf, r, &info) { Ok(0) => break, Ok(len) => { - conn.stream.write_message( - |out| frontend::copy_data(&buf[..len], out), - )?; + conn.stream + .write_message(|out| frontend::copy_data(&buf[..len], out))?; } Err(err) => { - conn.stream.write_message( - |buf| frontend::copy_fail("", buf), - )?; - conn.stream.write_message(|buf| { - Ok::<(), io::Error>(frontend::copy_done(buf)) - })?; - conn.stream.write_message( - |buf| Ok::<(), io::Error>(frontend::sync(buf)), - )?; + conn.stream + .write_message(|buf| frontend::copy_fail("", buf))?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; conn.stream.flush()?; match conn.read_message()? { backend::Message::ErrorResponse(_) => { @@ -396,12 +374,10 @@ impl<'conn> Statement<'conn> { } } - conn.stream.write_message(|buf| { - Ok::<(), io::Error>(frontend::copy_done(buf)) - })?; - conn.stream.write_message( - |buf| Ok::<(), io::Error>(frontend::sync(buf)), - )?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; conn.stream.flush()?; let num = match conn.read_message()? { @@ -444,30 +420,24 @@ impl<'conn> Statement<'conn> { /// ``` pub fn copy_out<'a, W: WriteWithInfo>(&'a self, params: &[&ToSql], w: &mut W) -> Result { let mut conn = self.conn.0.borrow_mut(); - conn.raw_execute( - &self.info.name, - "", - 0, - self.param_types(), - params, - )?; + conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; let (format, column_formats) = match conn.read_message()? { backend::Message::CopyOutResponse(body) => { let format = body.format(); - let column_formats = body.column_formats().map(|f| Format::from_u16(f)).collect()?; + let column_formats = body + .column_formats() + .map(|f| Format::from_u16(f)) + .collect()?; (format, column_formats) } backend::Message::CopyInResponse(_) => { - conn.stream.write_message( - |buf| frontend::copy_fail("", buf), - )?; - conn.stream.write_message(|buf| { - Ok::<(), io::Error>(frontend::copy_done(buf)) - })?; - conn.stream.write_message( - |buf| Ok::<(), io::Error>(frontend::sync(buf)), - )?; + conn.stream + .write_message(|buf| frontend::copy_fail("", buf))?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; + conn.stream + .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; conn.stream.flush()?; match conn.read_message()? { backend::Message::ErrorResponse(_) => { @@ -479,29 +449,23 @@ impl<'conn> Statement<'conn> { } } conn.wait_for_ready()?; - return Err( - io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_out` on a non-`COPY TO STDOUT` statement", - ).into(), - ); + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "called `copy_out` on a non-`COPY TO STDOUT` statement", + ).into()); } backend::Message::ErrorResponse(body) => { conn.wait_for_ready()?; return Err(err(&mut body.fields())); } - _ => { - loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err( - io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_out` on a non-`COPY TO STDOUT` statement", - ).into(), - ); - } + _ => loop { + if let backend::Message::ReadyForQuery(_) = conn.read_message()? { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "called `copy_out` on a non-`COPY TO STDOUT` statement", + ).into()); } - } + }, }; let info = CopyInfo { @@ -517,15 +481,11 @@ impl<'conn> Statement<'conn> { while !data.is_empty() { match w.write_with_info(data, &info) { Ok(n) => data = &data[n..], - Err(e) => { - loop { - if let backend::Message::ReadyForQuery(_) = - conn.read_message()? - { - return Err(e.into()); - } + Err(e) => loop { + if let backend::Message::ReadyForQuery(_) = conn.read_message()? { + return Err(e.into()); } - } + }, } } } @@ -534,20 +494,16 @@ impl<'conn> Statement<'conn> { count = parse_update_count(body.tag()?); break; } - backend::Message::ErrorResponse(body) => { - loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(err(&mut body.fields())); - } + backend::Message::ErrorResponse(body) => loop { + if let backend::Message::ReadyForQuery(_) = conn.read_message()? { + return Err(err(&mut body.fields())); } - } - _ => { - loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(bad_response().into()); - } + }, + _ => loop { + if let backend::Message::ReadyForQuery(_) = conn.read_message()? { + return Err(bad_response().into()); } - } + }, } } diff --git a/postgres/src/text_rows.rs b/postgres/src/text_rows.rs index fd3562c7b..678e98a57 100644 --- a/postgres/src/text_rows.rs +++ b/postgres/src/text_rows.rs @@ -8,8 +8,8 @@ use std::str; #[doc(inline)] pub use postgres_shared::rows::RowIndex; -use stmt::{Column}; -use {Result, error}; +use stmt::Column; +use {error, Result}; /// The resulting rows of a query. pub struct TextRows { @@ -89,11 +89,9 @@ impl<'a> Iterator for Iter<'a> { type Item = TextRow<'a>; fn next(&mut self) -> Option> { - self.iter.next().map(|row| { - TextRow { - columns: self.columns, - data: row, - } + self.iter.next().map(|row| TextRow { + columns: self.columns, + data: row, }) } @@ -104,11 +102,9 @@ impl<'a> Iterator for Iter<'a> { impl<'a> DoubleEndedIterator for Iter<'a> { fn next_back(&mut self) -> Option> { - self.iter.next_back().map(|row| { - TextRow { - columns: self.columns, - data: row, - } + self.iter.next_back().map(|row| TextRow { + columns: self.columns, + data: row, }) } } @@ -188,7 +184,8 @@ impl<'a> TextRow<'a> { None => return None, }; - self.data.get(idx) + self.data + .get(idx) .map(|s| str::from_utf8(s).map_err(|e| error::conversion(Box::new(e)))) } } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 1dfdc7d3c..b00593a09 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -4,8 +4,8 @@ use std::cell::Cell; use std::fmt; use rows::Rows; -use text_rows::TextRows; use stmt::Statement; +use text_rows::TextRows; use types::ToSql; use {bad_response, Connection, Result}; @@ -226,10 +226,9 @@ impl<'conn> Transaction<'conn> { } /// Like `Connection::batch_execute`. - #[deprecated(since="0.15.3", note="please use `simple_query` instead")] + #[deprecated(since = "0.15.3", note = "please use `simple_query` instead")] pub fn batch_execute(&self, query: &str) -> Result<()> { - self.simple_query(query) - .map(|_| ()) + self.simple_query(query).map(|_| ()) } /// Like `Connection::simple_query`. @@ -255,7 +254,8 @@ impl<'conn> Transaction<'conn> { /// Panics if there is an active nested transaction. #[inline] pub fn savepoint<'a, I>(&'a self, name: I) -> Result> - where I: Into + where + I: Into, { self._savepoint(name.into()) } @@ -292,8 +292,7 @@ impl<'conn> Transaction<'conn> { pub fn set_config(&self, config: &Config) -> Result<()> { let mut command = "SET TRANSACTION".to_owned(); config.build_command(&mut command); - self.simple_query(&command) - .map(|_| ()) + self.simple_query(&command).map(|_| ()) } /// Determines if the transaction is currently set to commit or roll back. diff --git a/postgres/tests/test.rs b/postgres/tests/test.rs index 753d9d9ad..3b8ee3844 100644 --- a/postgres/tests/test.rs +++ b/postgres/tests/test.rs @@ -1365,8 +1365,8 @@ fn test_rows_index() { #[test] fn test_type_names() { let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let stmt = - conn.prepare( + let stmt = conn + .prepare( "SELECT t.oid, t.typname FROM pg_catalog.pg_type t, pg_namespace n WHERE n.oid = t.typnamespace diff --git a/postgres/tests/types/chrono.rs b/postgres/tests/types/chrono.rs index 22bd7eef3..3d5ef64d3 100644 --- a/postgres/tests/types/chrono.rs +++ b/postgres/tests/types/chrono.rs @@ -1,6 +1,6 @@ extern crate chrono; -use self::chrono::{TimeZone, NaiveDate, NaiveTime, NaiveDateTime, DateTime, Utc}; +use self::chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; use types::test_type; use postgres::types::{Date, Timestamp}; @@ -9,9 +9,7 @@ use postgres::types::{Date, Timestamp}; fn test_naive_date_time_params() { fn make_check<'a>(time: &'a str) -> (Option, &'a str) { ( - Some( - NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), - ), + Some(NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), time, ) } diff --git a/postgres/tests/types/mod.rs b/postgres/tests/types/mod.rs index e95945b36..0a0be5384 100644 --- a/postgres/tests/types/mod.rs +++ b/postgres/tests/types/mod.rs @@ -364,7 +364,8 @@ fn test_slice() { INSERT INTO foo (f) VALUES ('a'), ('b'), ('c'), ('d');", ).unwrap(); - let stmt = conn.prepare("SELECT f FROM foo WHERE id = ANY($1)") + let stmt = conn + .prepare("SELECT f FROM foo WHERE id = ANY($1)") .unwrap(); let result = stmt.query(&[&&[1i32, 3, 4][..]]).unwrap(); assert_eq!( @@ -382,7 +383,8 @@ fn test_slice_wrong_type() { conn.simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") .unwrap(); - let stmt = conn.prepare("SELECT * FROM foo WHERE id = ANY($1)") + let stmt = conn + .prepare("SELECT * FROM foo WHERE id = ANY($1)") .unwrap(); let err = stmt.query(&[&&["hi"][..]]).unwrap_err(); match err.as_conversion() { diff --git a/postgres/tests/types/uuid.rs b/postgres/tests/types/uuid.rs index fd03ca828..d1b995ad8 100644 --- a/postgres/tests/types/uuid.rs +++ b/postgres/tests/types/uuid.rs @@ -8,9 +8,7 @@ fn test_uuid_params() { "UUID", &[ ( - Some( - uuid::Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap(), - ), + Some(uuid::Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", ), (None, "NULL"), diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index c8e3ec2eb..7dddfc7e2 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -33,7 +33,8 @@ impl SqlState { pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("01003")); /// 01007 pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); @@ -51,7 +52,8 @@ impl SqlState { pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = + SqlState(Cow::Borrowed("02001")); /// 03000 pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); @@ -66,10 +68,12 @@ impl SqlState { pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08001")); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08004")); /// 08007 pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); @@ -105,7 +109,8 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Cow::Borrowed("0Z002")); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = + SqlState(Cow::Borrowed("0Z002")); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); @@ -159,7 +164,8 @@ impl SqlState { pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("2201G")); /// 22018 pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); @@ -192,7 +198,8 @@ impl SqlState { pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = + SqlState(Cow::Borrowed("2201X")); /// 2202H pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); @@ -303,22 +310,27 @@ impl SqlState { pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = + SqlState(Cow::Borrowed("25008")); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25003")); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25004")); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25004")); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25005")); /// 25006 pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = + SqlState(Cow::Borrowed("25007")); /// 25P01 pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); @@ -345,7 +357,8 @@ impl SqlState { pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = + SqlState(Cow::Borrowed("2B000")); /// 2BP01 pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); @@ -357,7 +370,8 @@ impl SqlState { pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = + SqlState(Cow::Borrowed("2F005")); /// 2F002 pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); @@ -699,7 +713,8 @@ impl SqlState { pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = + SqlState(Cow::Borrowed("HV090")); /// HV00A pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); From dead5feeba0eafe8a616d503a8c7cc779af07a75 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 19:36:25 -0800 Subject: [PATCH 016/819] Bump docker image --- .circleci/config.yml | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f0f6b0066..96023d11e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,7 +25,7 @@ jobs: - image: rust:1.30.1 environment: RUSTFLAGS: -D warnings - - image: sfackler/rust-postgres-test:4 + - image: sfackler/rust-postgres-test:5 steps: - checkout - *RESTORE_REGISTRY diff --git a/docker-compose.yml b/docker-compose.yml index c149834e2..719bf5934 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,6 @@ version: '2' services: postgres: - image: "sfackler/rust-postgres-test:4" + image: "sfackler/rust-postgres-test:5" ports: - 5433:5433 From a24b927bbbe9eb506fb7c745437f826c0ca0443a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 19:44:07 -0800 Subject: [PATCH 017/819] Fix CopyIn Future bound --- tokio-postgres/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index c8e3e00dd..8a78a958e 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -293,7 +293,8 @@ where impl Future for CopyIn where - S: Stream>, + S: Stream, + S::Item: AsRef<[u8]>, S::Error: Into>, { type Item = u64; From 39e2723ccbbda89661e079a4821ea8c820b0e510 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 20:09:04 -0800 Subject: [PATCH 018/819] Remove tls-unique channel binding This got axed in the stable PostgreSQL 11 stable release. --- tokio-postgres-openssl/src/lib.rs | 31 +++++++++++++++++------------ tokio-postgres/src/proto/connect.rs | 4 +--- tokio-postgres/src/tls.rs | 7 ------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 3da7987c3..f3b72f0aa 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -10,6 +10,8 @@ extern crate futures; extern crate tokio; use futures::{Async, Future, Poll}; +use openssl::hash::MessageDigest; +use openssl::nid::Nid; use openssl::ssl::{ConnectConfiguration, HandshakeError, SslRef}; use std::fmt::Debug; use tokio_io::{AsyncRead, AsyncWrite}; @@ -58,19 +60,22 @@ where fn poll(&mut self) -> Poll<(SslStream, ChannelBinding), HandshakeError> { let stream = try_ready!(self.0.poll()); - let f = if stream.get_ref().ssl().session_reused() { - SslRef::peer_finished - } else { - SslRef::finished - }; - - let len = f(stream.get_ref().ssl(), &mut []); - let mut tls_unique = vec![0; len]; - f(stream.get_ref().ssl(), &mut tls_unique); + let mut channel_binding = ChannelBinding::new(); + if let Some(buf) = tls_server_end_point(stream.get_ref().ssl()) { + channel_binding = channel_binding.tls_server_end_point(buf); + } - Ok(Async::Ready(( - stream, - ChannelBinding::new().tls_unique(tls_unique), - ))) + Ok(Async::Ready((stream, channel_binding))) } } + +fn tls_server_end_point(ssl: &SslRef) -> Option> { + let cert = ssl.peer_certificate()?; + let algo_nid = cert.signature_algorithm().object().nid(); + let signature_algorithms = algo_nid.signature_algorithms()?; + let md = match signature_algorithms.digest { + Nid::MD5 | Nid::SHA1 => MessageDigest::sha256(), + nid => MessageDigest::from_nid(nid)?, + }; + cert.digest(md).ok().map(|b| b.to_vec()) +} diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 659be30a2..e843b593d 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -156,9 +156,7 @@ where } } - let channel_binding = if let Some(tls_unique) = state.channel_binding.tls_unique { - Some(sasl::ChannelBinding::tls_unique(tls_unique)) - } else if let Some(tls_server_end_point) = + let channel_binding = if let Some(tls_server_end_point) = state.channel_binding.tls_server_end_point { Some(sasl::ChannelBinding::tls_server_end_point( diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 2ba1a7dc8..fc7e82339 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -9,14 +9,12 @@ use void::Void; pub struct ChannelBinding { pub(crate) tls_server_end_point: Option>, - pub(crate) tls_unique: Option>, } impl ChannelBinding { pub fn new() -> ChannelBinding { ChannelBinding { tls_server_end_point: None, - tls_unique: None, } } @@ -24,11 +22,6 @@ impl ChannelBinding { self.tls_server_end_point = Some(tls_server_end_point); self } - - pub fn tls_unique(mut self, tls_unique: Vec) -> ChannelBinding { - self.tls_unique = Some(tls_unique); - self - } } pub trait TlsMode { From a2cac0ef617baf081eedf18b906aec1c14406640 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 28 Nov 2018 20:42:53 -0800 Subject: [PATCH 019/819] Remove tls-unique from blocking postgres crate --- postgres-openssl/src/lib.rs | 22 ++++++++++---------- postgres-protocol/src/authentication/sasl.rs | 10 +-------- postgres/src/lib.rs | 10 ++------- postgres/src/tls.rs | 10 --------- 4 files changed, 14 insertions(+), 38 deletions(-) diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 7f09b4708..8b29d35ae 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -2,7 +2,9 @@ pub extern crate openssl; extern crate postgres; use openssl::error::ErrorStack; -use openssl::ssl::{ConnectConfiguration, SslConnector, SslMethod, SslRef, SslStream}; +use openssl::hash::MessageDigest; +use openssl::nid::Nid; +use openssl::ssl::{ConnectConfiguration, SslConnector, SslMethod, SslStream}; use postgres::tls::{Stream, TlsHandshake, TlsStream}; use std::error::Error; use std::fmt; @@ -85,16 +87,14 @@ impl TlsStream for OpenSslStream { self.0.get_mut() } - fn tls_unique(&self) -> Option> { - let f = if self.0.ssl().session_reused() { - SslRef::peer_finished - } else { - SslRef::finished + fn tls_server_end_point(&self) -> Option> { + let cert = self.0.ssl().peer_certificate()?; + let algo_nid = cert.signature_algorithm().object().nid(); + let signature_algorithms = algo_nid.signature_algorithms()?; + let md = match signature_algorithms.digest { + Nid::MD5 | Nid::SHA1 => MessageDigest::sha256(), + nid => MessageDigest::from_nid(nid)?, }; - - let len = f(self.0.ssl(), &mut []); - let mut buf = vec![0; len]; - f(self.0.ssl(), &mut buf); - Some(buf) + cert.digest(md).ok().map(|b| b.to_vec()) } } diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index dfbb70b26..cd46d67d2 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -59,7 +59,6 @@ fn hi(str: &[u8], salt: &[u8], i: u32) -> GenericArray { enum ChannelBindingInner { Unrequested, Unsupported, - TlsUnique(Vec), TlsServerEndPoint(Vec), } @@ -77,11 +76,6 @@ impl ChannelBinding { ChannelBinding(ChannelBindingInner::Unsupported) } - /// The server requested channel binding and the client will use the `tls-unique` method. - pub fn tls_unique(finished: Vec) -> ChannelBinding { - ChannelBinding(ChannelBindingInner::TlsUnique(finished)) - } - /// The server requested channel binding and the client will use the `tls-server-end-point` /// method. pub fn tls_server_end_point(signature: Vec) -> ChannelBinding { @@ -92,7 +86,6 @@ impl ChannelBinding { match self.0 { ChannelBindingInner::Unrequested => "y,,", ChannelBindingInner::Unsupported => "n,,", - ChannelBindingInner::TlsUnique(_) => "p=tls-unique,,", ChannelBindingInner::TlsServerEndPoint(_) => "p=tls-server-end-point,,", } } @@ -100,8 +93,7 @@ impl ChannelBinding { fn cbind_data(&self) -> &[u8] { match self.0 { ChannelBindingInner::Unrequested | ChannelBindingInner::Unsupported => &[], - ChannelBindingInner::TlsUnique(ref buf) - | ChannelBindingInner::TlsServerEndPoint(ref buf) => buf, + ChannelBindingInner::TlsServerEndPoint(ref buf) => buf, } } } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index d6046015a..99600ed67 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -435,14 +435,8 @@ impl InnerConnection { let channel_binding = self .stream .get_ref() - .tls_unique() - .map(ChannelBinding::tls_unique) - .or_else(|| { - self.stream - .get_ref() - .tls_server_end_point() - .map(ChannelBinding::tls_server_end_point) - }); + .tls_server_end_point() + .map(ChannelBinding::tls_server_end_point); let (channel_binding, mechanism) = if has_scram_plus { match channel_binding { diff --git a/postgres/src/tls.rs b/postgres/src/tls.rs index 2c051a61c..8d425285a 100644 --- a/postgres/src/tls.rs +++ b/postgres/src/tls.rs @@ -13,16 +13,6 @@ pub trait TlsStream: fmt::Debug + Read + Write + Send { /// Returns a mutable reference to the underlying `Stream`. fn get_mut(&mut self) -> &mut Stream; - /// Returns the data associated with the `tls-unique` channel binding type as described in - /// [RFC 5929], if supported. - /// - /// An implementation only needs to support one of this or `tls_server_end_point`. - /// - /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 - fn tls_unique(&self) -> Option> { - None - } - /// Returns the data associated with the `tls-server-end-point` channel binding type as /// described in [RFC 5929], if supported. /// From d9d81b53a3e781c47e8145e6b11c9f27cf2eedab Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 29 Nov 2018 21:30:02 -0800 Subject: [PATCH 020/819] Tweak ChannelBinding API --- tokio-postgres-native-tls/src/lib.rs | 8 ++++---- tokio-postgres-openssl/src/lib.rs | 8 ++++---- tokio-postgres/src/tls.rs | 13 +++++++------ 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index d792ba013..cebfe5560 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -60,11 +60,11 @@ where fn poll(&mut self) -> Poll<(TlsStream, ChannelBinding), native_tls::Error> { let stream = try_ready!(self.0.poll()); - let mut channel_binding = ChannelBinding::new(); - if let Some(buf) = stream.get_ref().tls_server_end_point().unwrap_or(None) { - channel_binding = channel_binding.tls_server_end_point(buf); - } + let channel_binding = match stream.get_ref().tls_server_end_point().unwrap_or(None) { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + }; Ok(Async::Ready((stream, channel_binding))) } diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index f3b72f0aa..802ee0ad8 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -60,10 +60,10 @@ where fn poll(&mut self) -> Poll<(SslStream, ChannelBinding), HandshakeError> { let stream = try_ready!(self.0.poll()); - let mut channel_binding = ChannelBinding::new(); - if let Some(buf) = tls_server_end_point(stream.get_ref().ssl()) { - channel_binding = channel_binding.tls_server_end_point(buf); - } + let channel_binding = match tls_server_end_point(stream.get_ref().ssl()) { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + }; Ok(Async::Ready((stream, channel_binding))) } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index fc7e82339..30ea8e16b 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -12,15 +12,16 @@ pub struct ChannelBinding { } impl ChannelBinding { - pub fn new() -> ChannelBinding { + pub fn none() -> ChannelBinding { ChannelBinding { tls_server_end_point: None, } } - pub fn tls_server_end_point(mut self, tls_server_end_point: Vec) -> ChannelBinding { - self.tls_server_end_point = Some(tls_server_end_point); - self + pub fn tls_server_end_point(tls_server_end_point: Vec) -> ChannelBinding { + ChannelBinding { + tls_server_end_point: Some(tls_server_end_point), + } } } @@ -60,7 +61,7 @@ where fn handle_tls(self, use_tls: bool, stream: S) -> FutureResult<(S, ChannelBinding), Void> { debug_assert!(!use_tls); - future::ok((stream, ChannelBinding::new())) + future::ok((stream, ChannelBinding::none())) } } @@ -113,7 +114,7 @@ where } PreferTlsFutureInner::Raw(s) => Ok(Async::Ready(( MaybeTlsStream::Raw(s.take().expect("future polled after completion")), - ChannelBinding::new(), + ChannelBinding::none(), ))), } } From afa8a030480a8f1556453ed1d66686a399dbdd63 Mon Sep 17 00:00:00 2001 From: Davide Angelocola Date: Fri, 30 Nov 2018 18:46:14 +0100 Subject: [PATCH 021/819] fix typo --- postgres/src/lib.rs | 4 ++-- postgres/src/transaction.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 99600ed67..dce024555 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -1268,7 +1268,7 @@ impl Connection { /// Like `prepare`, but allows for the types of query parameters to be explicitly specified. /// - /// Postgres will normally infer the types of paramters, but this function offers more control + /// Postgres will normally infer the types of parameters, but this function offers more control /// of that behavior. `None` will cause Postgres to infer the type. The list of types can be /// shorter than the number of parameters in the query; it will act as if padded out with `None` /// values. @@ -1383,7 +1383,7 @@ impl Connection { /// statements, it's not possible to pass a separate parameters list with /// this API. /// - /// In general, the `query` API should be prefered whenever possible. + /// In general, the `query` API should be preferred whenever possible. /// /// # Example /// diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index b00593a09..3769c1a84 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -316,7 +316,7 @@ impl<'conn> Transaction<'conn> { self.finish() } - /// Consumes the transaction, commiting or rolling it back as appropriate. + /// Consumes the transaction, committing or rolling it back as appropriate. /// /// Functionally equivalent to the `Drop` implementation of `Transaction` /// except that it returns any error to the caller. From d58b1815adec1a946f14947ff2c9a11c9bfe17b5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 1 Dec 2018 20:01:46 -0800 Subject: [PATCH 022/819] Fix up new transaction API --- tokio-postgres/src/lib.rs | 17 +---------------- tokio-postgres/tests/test.rs | 20 +++++++++----------- 2 files changed, 10 insertions(+), 27 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 8e7110e6a..d8b24de8a 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -101,22 +101,7 @@ impl Client { CopyOut(self.0.copy_out(&statement.0, params)) } - pub fn transaction(&mut self, future: T) -> Transaction - where - T: Future, - // FIXME error type? - T::Error: From, - { - self.transaction_builder().build(future) - } - - /// Creates a TransactionBuilder, which can later be used to create - /// a Transaction around a future. - /// - /// Use this when Client is moved into the future being built. - /// For example, when executing multiple statements that depend - /// on the previous statement's result. - pub fn transaction_builder(&self) -> TransactionBuilder { + pub fn transaction(&mut self) -> TransactionBuilder { TransactionBuilder(self.0.clone()) } diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 8e554adbf..6efa2ddce 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -537,7 +537,7 @@ fn transaction_commit() { )).unwrap(); let f = client.batch_execute("INSERT INTO foo (name) VALUES ('steven')"); - runtime.block_on(client.transaction(f)).unwrap(); + runtime.block_on(client.transaction().build(f)).unwrap(); let rows = runtime .block_on( @@ -573,7 +573,7 @@ fn transaction_abort() { .batch_execute("INSERT INTO foo (name) VALUES ('steven')") .map_err(|e| Box::new(e) as Box) .and_then(|_| Err::<(), _>(Box::::from(""))); - runtime.block_on(client.transaction(f)).unwrap_err(); + runtime.block_on(client.transaction().build(f)).unwrap_err(); let rows = runtime .block_on( @@ -700,21 +700,19 @@ fn transaction_builder_around_moved_client() { let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime - .block_on(tokio_postgres::connect( - "postgres://postgres@localhost:5433".parse().unwrap(), - TlsMode::None, - )).unwrap(); + .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) + .unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); - let transaction_builder = client.transaction_builder(); + let transaction_builder = client.transaction(); let work = future::lazy(move || { - let execute = - client.batch_execute( - "CREATE TEMPORARY TABLE transaction_foo ( + let execute = client.batch_execute( + "CREATE TEMPORARY TABLE transaction_foo ( id SERIAL, name TEXT - )"); + )", + ); execute.and_then(move |_| { client From 43e65989835c49544fa281a7d95a3e7be64231cb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 1 Dec 2018 20:27:06 -0800 Subject: [PATCH 023/819] Modernize error a bit --- tokio-postgres/src/error/mod.rs | 41 +++++++++++---------------------- 1 file changed, 13 insertions(+), 28 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 642dfa463..bda6d4023 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -2,7 +2,7 @@ use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; -use std::error; +use std::error::{self, Error as _Error}; use std::fmt; use std::io; @@ -312,11 +312,7 @@ impl fmt::Display for DbError { } } -impl error::Error for DbError { - fn description(&self) -> &str { - &self.message - } -} +impl error::Error for DbError {} /// Represents the position of an error in a query. #[derive(Clone, PartialEq, Eq, Debug)] @@ -369,17 +365,7 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(error::Error::description(self))?; - if let Some(ref cause) = self.0.cause { - write!(fmt, ": {}", cause)?; - } - Ok(()) - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match self.0.kind { + let s = match self.0.kind { Kind::Io => "error communicating with the server", Kind::UnexpectedMessage => "unexpected message from server", Kind::Tls => "error performing TLS handshake", @@ -394,23 +380,22 @@ impl error::Error for Error { Kind::MissingPassword => "password not provided", Kind::UnsupportedAuthentication => "unsupported authentication method requested", Kind::Authentication => "authentication error", + }; + fmt.write_str(s)?; + if let Some(ref cause) = self.0.cause { + write!(fmt, ": {}", cause)?; } + Ok(()) } +} - fn cause(&self) -> Option<&error::Error> { - self.0.cause.as_ref().map(|e| &**e as &error::Error) +impl error::Error for Error { + fn source(&self) -> Option<&(error::Error + 'static)> { + self.0.cause.as_ref().map(|e| &**e as _) } } impl Error { - /// Returns the error's cause. - /// - /// This is the same as `Error::cause` except that it provides extra bounds - /// required to be able to downcast the error. - pub fn cause2(&self) -> Option<&(error::Error + 'static + Sync + Send)> { - self.0.cause.as_ref().map(|e| &**e) - } - /// Consumes the error, returning its cause. pub fn into_cause(self) -> Option> { self.0.cause @@ -421,7 +406,7 @@ impl Error { /// This is a convenience method that downcasts the cause to a `DbError` /// and returns its code. pub fn code(&self) -> Option<&SqlState> { - self.cause2() + self.source() .and_then(|e| e.downcast_ref::()) .map(|e| e.code()) } From db771e8bdf1aad0885ecace75970fe6235410a6e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 2 Dec 2018 21:26:10 -0800 Subject: [PATCH 024/819] Switch copy_in to use Buf --- tokio-postgres/src/lib.rs | 11 +++++++---- tokio-postgres/src/proto/client.rs | 4 +++- tokio-postgres/src/proto/copy_in.rs | 14 ++++++++++---- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d8b24de8a..022a79b25 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -16,7 +16,7 @@ extern crate log; #[macro_use] extern crate state_machine_future; -use bytes::Bytes; +use bytes::{Bytes, IntoBuf}; use futures::{Async, Future, Poll, Stream}; use postgres_shared::rows::RowIndex; use std::error::Error as StdError; @@ -90,7 +90,8 @@ impl Client { pub fn copy_in(&mut self, statement: &Statement, params: &[&ToSql], stream: S) -> CopyIn where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, // FIXME error type? S::Error: Into>, { @@ -283,13 +284,15 @@ pub struct Portal(proto::Portal); pub struct CopyIn(proto::CopyInFuture) where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>; impl Future for CopyIn where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>, { type Item = u64; diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index b7060128f..60b2f7098 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -1,4 +1,5 @@ use antidote::Mutex; +use bytes::IntoBuf; use futures::sync::mpsc; use futures::{AsyncSink, Sink, Stream}; use postgres_protocol; @@ -163,7 +164,8 @@ impl Client { pub fn copy_in(&self, statement: &Statement, params: &[&ToSql], stream: S) -> CopyInFuture where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>, { let (mut sender, receiver) = mpsc::channel(0); diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs index 8f22dbca0..85fbae7eb 100644 --- a/tokio-postgres/src/proto/copy_in.rs +++ b/tokio-postgres/src/proto/copy_in.rs @@ -1,3 +1,4 @@ +use bytes::{Buf, IntoBuf}; use futures::sink; use futures::sync::mpsc; use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; @@ -63,7 +64,8 @@ impl Stream for CopyInReceiver { pub enum CopyIn where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>, { #[state_machine_future(start, transitions(ReadCopyInResponse))] @@ -103,7 +105,8 @@ where impl PollCopyIn for CopyIn where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { @@ -151,7 +154,9 @@ where None => match try_ready!(state.stream.poll().map_err(Error::copy_in_stream)) { Some(data) => { let mut buf = vec![]; - frontend::copy_data(data.as_ref(), &mut buf).map_err(Error::encode)?; + // FIXME avoid collect + frontend::copy_data(&data.into_buf().collect::>(), &mut buf) + .map_err(Error::encode)?; CopyMessage::Data(buf) } None => { @@ -213,7 +218,8 @@ where impl CopyInFuture where S: Stream, - S::Item: AsRef<[u8]>, + S::Item: IntoBuf, + ::Buf: Send, S::Error: Into>, { pub fn new( From 39efb1378a687c698bd0042f26a9d70117c4290f Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Wed, 5 Dec 2018 10:59:04 -0800 Subject: [PATCH 025/819] Remove now-gone with-openssl from docs metadata. --- tokio-postgres/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 84a3a9bac..ae1e43eac 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -17,7 +17,6 @@ features = [ "with-geo-0.10", "with-serde_json-1", "with-uuid-0.6", - "with-openssl", ] [badges] From 14571ab0292f5fa11302b39fded69d98ee99b660 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 16:11:03 -0800 Subject: [PATCH 026/819] Remove synchronous crate It will be coming back! It's just going to involve a full rewrite and removing it for now makes some of that restructuring easier. --- .circleci/config.yml | 1 - Cargo.toml | 3 - postgres-native-tls/Cargo.toml | 9 - postgres-native-tls/src/lib.rs | 76 -- postgres-native-tls/src/test.rs | 38 - postgres-openssl/Cargo.toml | 9 - postgres-openssl/src/lib.rs | 100 -- postgres-openssl/src/test.rs | 40 - postgres/Cargo.toml | 66 -- postgres/src/lib.rs | 1560 ---------------------------- postgres/src/macros.rs | 69 -- postgres/src/notification.rs | 203 ---- postgres/src/params.rs | 3 - postgres/src/priv_io.rs | 259 ----- postgres/src/rows.rs | 342 ------ postgres/src/stmt.rs | 605 ----------- postgres/src/text_rows.rs | 191 ---- postgres/src/tls.rs | 50 - postgres/src/transaction.rs | 327 ------ postgres/tests/test.rs | 1481 -------------------------- postgres/tests/types/bit_vec.rs | 30 - postgres/tests/types/chrono.rs | 150 --- postgres/tests/types/eui48.rs | 17 - postgres/tests/types/geo.rs | 58 -- postgres/tests/types/mod.rs | 530 ---------- postgres/tests/types/serde_json.rs | 40 - postgres/tests/types/uuid.rs | 17 - 27 files changed, 6274 deletions(-) delete mode 100644 postgres-native-tls/Cargo.toml delete mode 100644 postgres-native-tls/src/lib.rs delete mode 100644 postgres-native-tls/src/test.rs delete mode 100644 postgres-openssl/Cargo.toml delete mode 100644 postgres-openssl/src/lib.rs delete mode 100644 postgres-openssl/src/test.rs delete mode 100644 postgres/Cargo.toml delete mode 100644 postgres/src/lib.rs delete mode 100644 postgres/src/macros.rs delete mode 100644 postgres/src/notification.rs delete mode 100644 postgres/src/params.rs delete mode 100644 postgres/src/priv_io.rs delete mode 100644 postgres/src/rows.rs delete mode 100644 postgres/src/stmt.rs delete mode 100644 postgres/src/text_rows.rs delete mode 100644 postgres/src/tls.rs delete mode 100644 postgres/src/transaction.rs delete mode 100644 postgres/tests/test.rs delete mode 100644 postgres/tests/types/bit_vec.rs delete mode 100644 postgres/tests/types/chrono.rs delete mode 100644 postgres/tests/types/eui48.rs delete mode 100644 postgres/tests/types/geo.rs delete mode 100644 postgres/tests/types/mod.rs delete mode 100644 postgres/tests/types/serde_json.rs delete mode 100644 postgres/tests/types/uuid.rs diff --git a/.circleci/config.yml b/.circleci/config.yml index 96023d11e..47efb05d2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,6 +34,5 @@ jobs: - run: rustc --version > ~/rust-version - *RESTORE_DEPS - run: cargo test --all - - run: cargo test -p postgres --all-features - run: cargo test -p tokio-postgres --all-features - *SAVE_DEPS diff --git a/Cargo.toml b/Cargo.toml index 54c0fbbed..2424197c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,8 @@ [workspace] members = [ "codegen", - "postgres", "postgres-protocol", "postgres-shared", - "postgres-openssl", - "postgres-native-tls", "tokio-postgres", "tokio-postgres-native-tls", "tokio-postgres-openssl", diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml deleted file mode 100644 index ada50ccff..000000000 --- a/postgres-native-tls/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "postgres-native-tls" -version = "0.1.0" -authors = ["Steven Fackler "] - -[dependencies] -native-tls = "0.2" - -postgres = { version = "0.15", path = "../postgres" } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs deleted file mode 100644 index ea887ed70..000000000 --- a/postgres-native-tls/src/lib.rs +++ /dev/null @@ -1,76 +0,0 @@ -pub extern crate native_tls; -extern crate postgres; - -use native_tls::TlsConnector; -use postgres::tls::{Stream, TlsHandshake, TlsStream}; -use std::error::Error; -use std::fmt::{self, Debug}; -use std::io::{self, Read, Write}; - -#[cfg(test)] -mod test; - -pub struct NativeTls { - connector: TlsConnector, -} - -impl Debug for NativeTls { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("NativeTls").finish() - } -} - -impl NativeTls { - pub fn new() -> Result { - let connector = TlsConnector::builder().build()?; - Ok(NativeTls::with_connector(connector)) - } - - pub fn with_connector(connector: TlsConnector) -> NativeTls { - NativeTls { connector } - } -} - -impl TlsHandshake for NativeTls { - fn tls_handshake( - &self, - domain: &str, - stream: Stream, - ) -> Result, Box> { - let stream = self.connector.connect(domain, stream)?; - Ok(Box::new(NativeTlsStream(stream))) - } -} - -#[derive(Debug)] -struct NativeTlsStream(native_tls::TlsStream); - -impl Read for NativeTlsStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } -} - -impl Write for NativeTlsStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} - -impl TlsStream for NativeTlsStream { - fn get_ref(&self) -> &Stream { - self.0.get_ref() - } - - fn get_mut(&mut self) -> &mut Stream { - self.0.get_mut() - } - - fn tls_server_end_point(&self) -> Option> { - self.0.tls_server_end_point().ok().and_then(|o| o) - } -} diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs deleted file mode 100644 index a84798d46..000000000 --- a/postgres-native-tls/src/test.rs +++ /dev/null @@ -1,38 +0,0 @@ -use native_tls::{Certificate, TlsConnector}; -use postgres::{Connection, TlsMode}; - -use NativeTls; - -#[test] -fn connect() { - let cert = include_bytes!("../../test/server.crt"); - let cert = Certificate::from_pem(cert).unwrap(); - - let mut builder = TlsConnector::builder(); - builder.add_root_certificate(cert); - let connector = builder.build().unwrap(); - - let handshake = NativeTls::with_connector(connector); - let conn = Connection::connect( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Require(&handshake), - ).unwrap(); - conn.execute("SELECT 1::VARCHAR", &[]).unwrap(); -} - -#[test] -fn scram_user() { - let cert = include_bytes!("../../test/server.crt"); - let cert = Certificate::from_pem(cert).unwrap(); - - let mut builder = TlsConnector::builder(); - builder.add_root_certificate(cert); - let connector = builder.build().unwrap(); - - let handshake = NativeTls::with_connector(connector); - let conn = Connection::connect( - "postgres://scram_user:password@localhost:5433/postgres", - TlsMode::Require(&handshake), - ).unwrap(); - conn.execute("SELECT 1::VARCHAR", &[]).unwrap(); -} diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml deleted file mode 100644 index 53321fd56..000000000 --- a/postgres-openssl/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "postgres-openssl" -version = "0.1.0" -authors = ["Steven Fackler "] - -[dependencies] -openssl = "0.10.9" - -postgres = { version = "0.15", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs deleted file mode 100644 index 8b29d35ae..000000000 --- a/postgres-openssl/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -pub extern crate openssl; -extern crate postgres; - -use openssl::error::ErrorStack; -use openssl::hash::MessageDigest; -use openssl::nid::Nid; -use openssl::ssl::{ConnectConfiguration, SslConnector, SslMethod, SslStream}; -use postgres::tls::{Stream, TlsHandshake, TlsStream}; -use std::error::Error; -use std::fmt; -use std::io::{self, Read, Write}; - -#[cfg(test)] -mod test; - -pub struct OpenSsl { - connector: SslConnector, - config: Box Result<(), ErrorStack> + Sync + Send>, -} - -impl fmt::Debug for OpenSsl { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("OpenSsl").finish() - } -} - -impl OpenSsl { - pub fn new() -> Result { - let connector = SslConnector::builder(SslMethod::tls())?.build(); - Ok(OpenSsl::with_connector(connector)) - } - - pub fn with_connector(connector: SslConnector) -> OpenSsl { - OpenSsl { - connector, - config: Box::new(|_| Ok(())), - } - } - - pub fn callback(&mut self, f: F) - where - F: Fn(&mut ConnectConfiguration) -> Result<(), ErrorStack> + 'static + Sync + Send, - { - self.config = Box::new(f); - } -} - -impl TlsHandshake for OpenSsl { - fn tls_handshake( - &self, - domain: &str, - stream: Stream, - ) -> Result, Box> { - let mut ssl = self.connector.configure()?; - (self.config)(&mut ssl)?; - let stream = ssl.connect(domain, stream)?; - - Ok(Box::new(OpenSslStream(stream))) - } -} - -#[derive(Debug)] -struct OpenSslStream(SslStream); - -impl Read for OpenSslStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } -} - -impl Write for OpenSslStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} - -impl TlsStream for OpenSslStream { - fn get_ref(&self) -> &Stream { - self.0.get_ref() - } - - fn get_mut(&mut self) -> &mut Stream { - self.0.get_mut() - } - - fn tls_server_end_point(&self) -> Option> { - let cert = self.0.ssl().peer_certificate()?; - let algo_nid = cert.signature_algorithm().object().nid(); - let signature_algorithms = algo_nid.signature_algorithms()?; - let md = match signature_algorithms.digest { - Nid::MD5 | Nid::SHA1 => MessageDigest::sha256(), - nid => MessageDigest::from_nid(nid)?, - }; - cert.digest(md).ok().map(|b| b.to_vec()) - } -} diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs deleted file mode 100644 index 8314e179f..000000000 --- a/postgres-openssl/src/test.rs +++ /dev/null @@ -1,40 +0,0 @@ -use openssl::ssl::{SslConnector, SslMethod}; -use postgres::{Connection, TlsMode}; - -use OpenSsl; - -#[test] -fn require() { - let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); - builder.set_ca_file("../test/server.crt").unwrap(); - let negotiator = OpenSsl::with_connector(builder.build()); - let conn = Connection::connect( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Require(&negotiator), - ).unwrap(); - conn.execute("SELECT 1::VARCHAR", &[]).unwrap(); -} - -#[test] -fn prefer() { - let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); - builder.set_ca_file("../test/server.crt").unwrap(); - let negotiator = OpenSsl::with_connector(builder.build()); - let conn = Connection::connect( - "postgres://ssl_user@localhost:5433/postgres", - TlsMode::Require(&negotiator), - ).unwrap(); - conn.execute("SELECT 1::VARCHAR", &[]).unwrap(); -} - -#[test] -fn scram_user() { - let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); - builder.set_ca_file("../test/server.crt").unwrap(); - let negotiator = OpenSsl::with_connector(builder.build()); - let conn = Connection::connect( - "postgres://scram_user:password@localhost:5433/postgres", - TlsMode::Require(&negotiator), - ).unwrap(); - conn.execute("SELECT 1::VARCHAR", &[]).unwrap(); -} diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml deleted file mode 100644 index 2a21d33a1..000000000 --- a/postgres/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "postgres" -version = "0.15.2" -authors = ["Steven Fackler "] -license = "MIT" -description = "A native PostgreSQL driver" -repository = "https://github.com/sfackler/rust-postgres" -readme = "../README.md" -keywords = ["database", "postgres", "postgresql", "sql"] -include = ["src/*", "Cargo.toml", "LICENSE", "README.md", "THIRD_PARTY"] -categories = ["database"] - -[package.metadata.docs.rs] -features = [ - "with-bit-vec-0.5", - "with-chrono-0.4", - "with-eui48-0.3", - "with-geo-0.10", - "with-serde_json-1", - "with-uuid-0.6", - "with-openssl", - "with-native-tls", -] - -[badges] -circle-ci = { repository = "sfackler/rust-postgres" } - -[lib] -name = "postgres" -path = "src/lib.rs" -test = false -bench = false - -[[test]] -name = "test" -path = "tests/test.rs" - -[features] -"with-bit-vec-0.5" = ["postgres-shared/with-bit-vec-0.5"] -"with-chrono-0.4" = ["postgres-shared/with-chrono-0.4"] -"with-eui48-0.3" = ["postgres-shared/with-eui48-0.3"] -"with-geo-0.10" = ["postgres-shared/with-geo-0.10"] -"with-serde_json-1" = ["postgres-shared/with-serde_json-1"] -"with-uuid-0.6" = ["postgres-shared/with-uuid-0.6"] - -no-logging = [] - -[dependencies] -bytes = "0.4" -fallible-iterator = "0.1.3" -log = "0.4" -socket2 = { version = "0.3.5", features = ["unix"] } - -postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } -postgres-shared = { version = "0.4.1", path = "../postgres-shared" } - -[dev-dependencies] -hex = "0.3" -url = "1.0" - -bit-vec = "0.5" -chrono = "0.4" -eui48 = "0.3" -geo = "0.10" -serde_json = "1.0" -uuid = "0.6" diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs deleted file mode 100644 index dce024555..000000000 --- a/postgres/src/lib.rs +++ /dev/null @@ -1,1560 +0,0 @@ -//! A pure-Rust frontend for the popular PostgreSQL database. -//! -//! ```rust,no_run -//! extern crate postgres; -//! -//! use postgres::{Connection, TlsMode}; -//! -//! struct Person { -//! id: i32, -//! name: String, -//! data: Option> -//! } -//! -//! fn main() { -//! let conn = Connection::connect("postgresql://postgres@localhost:5433", TlsMode::None) -//! .unwrap(); -//! -//! conn.execute("CREATE TABLE person ( -//! id SERIAL PRIMARY KEY, -//! name VARCHAR NOT NULL, -//! data BYTEA -//! )", &[]).unwrap(); -//! let me = Person { -//! id: 0, -//! name: "Steven".to_owned(), -//! data: None -//! }; -//! conn.execute("INSERT INTO person (name, data) VALUES ($1, $2)", -//! &[&me.name, &me.data]).unwrap(); -//! -//! for row in &conn.query("SELECT id, name, data FROM person", &[]).unwrap() { -//! let person = Person { -//! id: row.get(0), -//! name: row.get(1), -//! data: row.get(2) -//! }; -//! println!("Found person {}", person.name); -//! } -//! } -//! ``` -//! -//! # SSL/TLS -//! -//! This crate supports TLS secured connections. The `TlsMode` enum is passed to connection methods -//! and indicates if the connection will not, may, or must be secured by TLS. The TLS implementation -//! is pluggable through the `TlsHandshake` trait. Implementations for OpenSSL, Secure Transport, -//! SChannel, and the `native-tls` crate are provided behind the `with-openssl`, -//! `with-security-framework`, `with-schannel`, and `with-native-tls` feature flags respectively. -//! -//! ## Examples -//! -//! Connecting using `native-tls`: -//! -//! ```no_run -//! extern crate postgres; -//! -//! use postgres::{Connection, TlsMode}; -//! # #[cfg(feature = "with-native-tls")] -//! use postgres::tls::native_tls::NativeTls; -//! -//! # #[cfg(not(feature = "with-native-tls"))] fn main() {} -//! # #[cfg(feature = "with-native-tls")] -//! fn main() { -//! let negotiator = NativeTls::new().unwrap(); -//! let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::Require(&negotiator)) -//! .unwrap(); -//! } -//! ``` -#![doc(html_root_url = "https://docs.rs/postgres/0.15.1")] -#![warn(missing_docs)] -#![allow(unknown_lints, needless_lifetimes, doc_markdown)] // for clippy - -extern crate bytes; -extern crate fallible_iterator; -#[cfg(not(feature = "no-logging"))] -#[macro_use] -extern crate log; -extern crate postgres_protocol; -extern crate postgres_shared; -extern crate socket2; - -use fallible_iterator::FallibleIterator; -use postgres_protocol::authentication; -use postgres_protocol::authentication::sasl::{self, ChannelBinding, ScramSha256}; -use postgres_protocol::message::backend::{self, ErrorFields}; -use postgres_protocol::message::frontend; -use postgres_shared::rows::RowData; -use std::cell::{Cell, RefCell}; -use std::collections::{HashMap, VecDeque}; -use std::fmt; -use std::io; -use std::mem; -use std::result; -use std::sync::Arc; -use std::time::Duration; - -use error::{DbError, SqlState}; -use notification::{Notification, Notifications}; -use params::{IntoConnectParams, User}; -use priv_io::MessageStream; -use rows::Rows; -use stmt::{Column, Statement}; -use text_rows::TextRows; -use tls::TlsHandshake; -use transaction::{IsolationLevel, Transaction}; -use types::{Field, FromSql, IsNull, Kind, Oid, ToSql, Type}; - -#[doc(inline)] -pub use error::Error; -#[doc(inline)] -pub use postgres_shared::CancelData; -#[doc(inline)] -pub use postgres_shared::{error, types}; - -#[macro_use] -mod macros; - -pub mod notification; -pub mod params; -mod priv_io; -pub mod rows; -pub mod stmt; -pub mod text_rows; -pub mod tls; -pub mod transaction; - -const TYPEINFO_QUERY: &'static str = "__typeinfo"; -const TYPEINFO_ENUM_QUERY: &'static str = "__typeinfo_enum"; -const TYPEINFO_COMPOSITE_QUERY: &'static str = "__typeinfo_composite"; - -/// A type alias of the result returned by many methods. -pub type Result = result::Result; - -/// A trait implemented by types that can handle Postgres notice messages. -/// -/// It is implemented for all `Send + FnMut(DbError)` closures. -pub trait HandleNotice: Send { - /// Handle a Postgres notice message - fn handle_notice(&mut self, notice: DbError); -} - -impl HandleNotice for F { - fn handle_notice(&mut self, notice: DbError) { - self(notice) - } -} - -/// A notice handler which logs at the `info` level. -/// -/// This is the default handler used by a `Connection`. -#[derive(Copy, Clone, Debug)] -pub struct LoggingNoticeHandler; - -impl HandleNotice for LoggingNoticeHandler { - fn handle_notice(&mut self, _notice: DbError) { - info!("{}: {}", _notice.severity, _notice.message); - } -} - -/// Attempts to cancel an in-progress query. -/// -/// The backend provides no information about whether a cancellation attempt -/// was successful or not. An error will only be returned if the driver was -/// unable to connect to the database. -/// -/// A `CancelData` object can be created via `Connection::cancel_data`. The -/// object can cancel any query made on that connection. -/// -/// Only the host and port of the connection info are used. See -/// `Connection::connect` for details of the `params` argument. -/// -/// # Example -/// -/// ```rust,no_run -/// # use postgres::{Connection, TlsMode}; -/// # use std::thread; -/// # let url = ""; -/// let conn = Connection::connect(url, TlsMode::None).unwrap(); -/// let cancel_data = conn.cancel_data(); -/// thread::spawn(move || { -/// conn.execute("SOME EXPENSIVE QUERY", &[]).unwrap(); -/// }); -/// postgres::cancel_query(url, TlsMode::None, &cancel_data).unwrap(); -/// ``` -pub fn cancel_query(params: T, tls: TlsMode, data: &CancelData) -> Result<()> -where - T: IntoConnectParams, -{ - let params = params.into_connect_params().map_err(error::connect)?; - let mut socket = priv_io::initialize_stream(¶ms, tls)?; - - let mut buf = vec![]; - frontend::cancel_request(data.process_id, data.secret_key, &mut buf); - socket.write_all(&buf)?; - socket.flush()?; - - Ok(()) -} - -fn bad_response() -> io::Error { - io::Error::new( - io::ErrorKind::InvalidInput, - "the server returned an unexpected response", - ) -} - -fn desynchronized() -> io::Error { - io::Error::new( - io::ErrorKind::Other, - "communication with the server has desynchronized due to an earlier IO error", - ) -} - -/// Specifies the TLS support requested for a new connection. -#[derive(Debug)] -pub enum TlsMode<'a> { - /// The connection will not use TLS. - None, - /// The connection will use TLS if the backend supports it. - Prefer(&'a TlsHandshake), - /// The connection must use TLS. - Require(&'a TlsHandshake), -} - -#[derive(Debug)] -struct StatementInfo { - name: String, - param_types: Vec, - columns: Vec, -} - -struct InnerConnection { - stream: MessageStream, - notice_handler: Box, - notifications: VecDeque, - cancel_data: CancelData, - unknown_types: HashMap, - cached_statements: HashMap>, - parameters: HashMap, - next_stmt_id: u32, - trans_depth: u32, - desynchronized: bool, - finished: bool, - has_typeinfo_query: bool, - has_typeinfo_enum_query: bool, - has_typeinfo_composite_query: bool, -} - -impl Drop for InnerConnection { - fn drop(&mut self) { - if !self.finished { - let _ = self.finish_inner(); - } - } -} - -impl InnerConnection { - fn connect(params: T, tls: TlsMode) -> Result - where - T: IntoConnectParams, - { - let params = params.into_connect_params().map_err(error::connect)?; - let stream = priv_io::initialize_stream(¶ms, tls)?; - - let user = match params.user() { - Some(user) => user, - None => { - return Err(error::connect( - "user missing from connection parameters".into(), - )); - } - }; - - let mut conn = InnerConnection { - stream: MessageStream::new(stream), - next_stmt_id: 0, - notice_handler: Box::new(LoggingNoticeHandler), - notifications: VecDeque::new(), - cancel_data: CancelData { - process_id: 0, - secret_key: 0, - }, - unknown_types: HashMap::new(), - cached_statements: HashMap::new(), - parameters: HashMap::new(), - desynchronized: false, - finished: false, - trans_depth: 0, - has_typeinfo_query: false, - has_typeinfo_enum_query: false, - has_typeinfo_composite_query: false, - }; - - let mut options = params.options().to_owned(); - options.push(("client_encoding".to_owned(), "UTF8".to_owned())); - // Postgres uses the value of TimeZone as the time zone for TIMESTAMP - // WITH TIME ZONE values. Timespec converts to GMT internally. - options.push(("timezone".to_owned(), "GMT".to_owned())); - // We have to clone here since we need the user again for auth - options.push(("user".to_owned(), user.name().to_owned())); - if let Some(database) = params.database() { - options.push(("database".to_owned(), database.to_owned())); - } - - let options = options.iter().map(|&(ref a, ref b)| (&**a, &**b)); - conn.stream - .write_message(|buf| frontend::startup_message(options, buf))?; - conn.stream.flush()?; - - conn.handle_auth(user)?; - - loop { - match conn.read_message()? { - backend::Message::BackendKeyData(body) => { - conn.cancel_data.process_id = body.process_id(); - conn.cancel_data.secret_key = body.secret_key(); - } - backend::Message::ReadyForQuery(_) => break, - backend::Message::ErrorResponse(body) => { - return Err(err(&mut body.fields())); - } - _ => return Err(bad_response().into()), - } - } - - Ok(conn) - } - - fn read_message_with_notification(&mut self) -> io::Result { - debug_assert!(!self.desynchronized); - loop { - match try_desync!(self, self.stream.read_message()) { - backend::Message::NoticeResponse(body) => { - if let Ok(err) = DbError::new(&mut body.fields()) { - self.notice_handler.handle_notice(err); - } - } - backend::Message::ParameterStatus(body) => { - self.parameters - .insert(body.name()?.to_owned(), body.value()?.to_owned()); - } - val => return Ok(val), - } - } - } - - fn read_message_with_notification_timeout( - &mut self, - timeout: Duration, - ) -> io::Result> { - debug_assert!(!self.desynchronized); - loop { - match try_desync!(self, self.stream.read_message_timeout(timeout)) { - Some(backend::Message::NoticeResponse(body)) => { - if let Ok(err) = Err(err(&mut body.fields())) { - self.notice_handler.handle_notice(err); - } - } - Some(backend::Message::ParameterStatus(body)) => { - self.parameters - .insert(body.name()?.to_owned(), body.value()?.to_owned()); - } - val => return Ok(val), - } - } - } - - fn read_message_with_notification_nonblocking( - &mut self, - ) -> io::Result> { - debug_assert!(!self.desynchronized); - loop { - match try_desync!(self, self.stream.read_message_nonblocking()) { - Some(backend::Message::NoticeResponse(body)) => { - if let Ok(err) = Err(err(&mut body.fields())) { - self.notice_handler.handle_notice(err); - } - } - Some(backend::Message::ParameterStatus(body)) => { - self.parameters - .insert(body.name()?.to_owned(), body.value()?.to_owned()); - } - val => return Ok(val), - } - } - } - - fn read_message(&mut self) -> io::Result { - loop { - match self.read_message_with_notification()? { - backend::Message::NotificationResponse(body) => { - self.notifications.push_back(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - }) - } - val => return Ok(val), - } - } - } - - fn handle_auth(&mut self, user: &User) -> Result<()> { - match self.read_message()? { - backend::Message::AuthenticationOk => return Ok(()), - backend::Message::AuthenticationCleartextPassword => { - let pass = user.password().ok_or_else(|| { - error::connect("a password was requested but not provided".into()) - })?; - self.stream - .write_message(|buf| frontend::password_message(pass, buf))?; - self.stream.flush()?; - } - backend::Message::AuthenticationMd5Password(body) => { - let pass = user.password().ok_or_else(|| { - error::connect("a password was requested but not provided".into()) - })?; - let output = - authentication::md5_hash(user.name().as_bytes(), pass.as_bytes(), body.salt()); - self.stream - .write_message(|buf| frontend::password_message(&output, buf))?; - self.stream.flush()?; - } - backend::Message::AuthenticationSasl(body) => { - let mut has_scram = false; - let mut has_scram_plus = false; - let mut mechanisms = body.mechanisms(); - while let Some(mechanism) = mechanisms.next()? { - match mechanism { - sasl::SCRAM_SHA_256 => has_scram = true, - sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true, - _ => {} - } - } - let channel_binding = self - .stream - .get_ref() - .tls_server_end_point() - .map(ChannelBinding::tls_server_end_point); - - let (channel_binding, mechanism) = if has_scram_plus { - match channel_binding { - Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), - None => (ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else if has_scram { - match channel_binding { - Some(_) => (ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), - None => (ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else { - return Err( - io::Error::new(io::ErrorKind::Other, "unsupported authentication").into(), - ); - }; - - let pass = user.password().ok_or_else(|| { - error::connect("a password was requested but not provided".into()) - })?; - - let mut scram = ScramSha256::new(pass.as_bytes(), channel_binding); - - self.stream.write_message(|buf| { - frontend::sasl_initial_response(mechanism, scram.message(), buf) - })?; - self.stream.flush()?; - - let body = match self.read_message()? { - backend::Message::AuthenticationSaslContinue(body) => body, - backend::Message::ErrorResponse(body) => return Err(err(&mut body.fields())), - _ => return Err(bad_response().into()), - }; - - scram.update(body.data())?; - - self.stream - .write_message(|buf| frontend::sasl_response(scram.message(), buf))?; - self.stream.flush()?; - - let body = match self.read_message()? { - backend::Message::AuthenticationSaslFinal(body) => body, - backend::Message::ErrorResponse(body) => return Err(err(&mut body.fields())), - _ => return Err(bad_response().into()), - }; - - scram.finish(body.data())?; - } - backend::Message::AuthenticationKerberosV5 - | backend::Message::AuthenticationScmCredential - | backend::Message::AuthenticationGss - | backend::Message::AuthenticationSspi => { - return Err( - io::Error::new(io::ErrorKind::Other, "unsupported authentication").into(), - ) - } - backend::Message::ErrorResponse(body) => return Err(err(&mut body.fields())), - _ => return Err(bad_response().into()), - } - - match self.read_message()? { - backend::Message::AuthenticationOk => Ok(()), - backend::Message::ErrorResponse(body) => Err(err(&mut body.fields())), - _ => Err(bad_response().into()), - } - } - - fn set_notice_handler(&mut self, handler: Box) -> Box { - mem::replace(&mut self.notice_handler, handler) - } - - fn raw_prepare( - &mut self, - stmt_name: &str, - query: &str, - types: &[Option], - ) -> Result<(Vec, Vec)> { - debug!("preparing query with name `{}`: {}", stmt_name, query); - - self.stream.write_message(|buf| { - frontend::parse( - stmt_name, - query, - types.iter().map(|t| t.as_ref().map_or(0, |t| t.oid())), - buf, - ) - })?; - self.stream - .write_message(|buf| frontend::describe(b'S', stmt_name, buf))?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - - match self.read_message()? { - backend::Message::ParseComplete => {} - backend::Message::ErrorResponse(body) => { - self.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => bad_response!(self), - } - - let raw_param_types = match self.read_message()? { - backend::Message::ParameterDescription(body) => body, - _ => bad_response!(self), - }; - - let raw_columns = match self.read_message()? { - backend::Message::RowDescription(body) => Some(body), - backend::Message::NoData => None, - _ => bad_response!(self), - }; - - self.wait_for_ready()?; - - let param_types = raw_param_types - .parameters() - .map_err(Into::into) - .and_then(|oid| self.get_type(oid)) - .collect()?; - - let columns = self.parse_cols(raw_columns)?; - Ok((param_types, columns)) - } - - fn read_rows(&mut self, mut consumer: F) -> Result - where - F: FnMut(RowData), - { - let more_rows; - loop { - match self.read_message()? { - backend::Message::EmptyQueryResponse | backend::Message::CommandComplete(_) => { - more_rows = false; - break; - } - backend::Message::PortalSuspended => { - more_rows = true; - break; - } - backend::Message::DataRow(body) => consumer(RowData::new(body)?), - backend::Message::ErrorResponse(body) => { - self.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - backend::Message::CopyInResponse(_) => { - self.stream.write_message(|buf| { - frontend::copy_fail("COPY queries cannot be directly executed", buf) - })?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - } - backend::Message::CopyOutResponse(_) => { - loop { - if let backend::Message::ReadyForQuery(_) = self.read_message()? { - break; - } - } - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "COPY queries cannot be directly \ - executed", - ).into()); - } - _ => { - self.desynchronized = true; - return Err(bad_response().into()); - } - } - } - self.wait_for_ready()?; - Ok(more_rows) - } - - fn raw_execute( - &mut self, - stmt_name: &str, - portal_name: &str, - row_limit: i32, - param_types: &[Type], - params: &[&ToSql], - ) -> Result<()> { - assert!( - param_types.len() == params.len(), - "expected {} parameters but got {}", - param_types.len(), - params.len() - ); - debug!( - "executing statement {} with parameters: {:?}", - stmt_name, params - ); - - { - let r = self.stream.write_message(|buf| { - frontend::bind( - portal_name, - stmt_name, - Some(1), - params.iter().zip(param_types), - |(param, ty), buf| match param.to_sql_checked(ty, buf) { - Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), - Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), - Err(e) => Err(e), - }, - Some(1), - buf, - ) - }); - match r { - Ok(()) => {} - Err(frontend::BindError::Conversion(e)) => { - return Err(error::conversion(e)); - } - Err(frontend::BindError::Serialization(e)) => return Err(e.into()), - } - } - - self.stream - .write_message(|buf| frontend::execute(portal_name, row_limit, buf))?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - - match self.read_message()? { - backend::Message::BindComplete => Ok(()), - backend::Message::ErrorResponse(body) => { - self.wait_for_ready()?; - Err(err(&mut body.fields())) - } - _ => { - self.desynchronized = true; - Err(bad_response().into()) - } - } - } - - fn make_stmt_name(&mut self) -> String { - let stmt_name = format!("s{}", self.next_stmt_id); - self.next_stmt_id += 1; - stmt_name - } - - fn prepare_typed<'a>( - &mut self, - query: &str, - types: &[Option], - conn: &'a Connection, - ) -> Result> { - let stmt_name = self.make_stmt_name(); - let (param_types, columns) = self.raw_prepare(&stmt_name, query, types)?; - let info = Arc::new(StatementInfo { - name: stmt_name, - param_types: param_types, - columns: columns, - }); - Ok(Statement::new(conn, info, Cell::new(0), false)) - } - - fn prepare_cached<'a>(&mut self, query: &str, conn: &'a Connection) -> Result> { - let info = self.cached_statements.get(query).cloned(); - - let info = match info { - Some(info) => info, - None => { - let stmt_name = self.make_stmt_name(); - let (param_types, columns) = self.raw_prepare(&stmt_name, query, &[])?; - let info = Arc::new(StatementInfo { - name: stmt_name, - param_types: param_types, - columns: columns, - }); - self.cached_statements - .insert(query.to_owned(), info.clone()); - info - } - }; - - Ok(Statement::new(conn, info, Cell::new(0), true)) - } - - fn close_statement(&mut self, name: &str, type_: u8) -> Result<()> { - self.stream - .write_message(|buf| frontend::close(type_, name, buf))?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - let resp = match self.read_message()? { - backend::Message::CloseComplete => Ok(()), - backend::Message::ErrorResponse(body) => Err(err(&mut body.fields())), - _ => bad_response!(self), - }; - self.wait_for_ready()?; - resp - } - - fn get_type(&mut self, oid: Oid) -> Result { - if let Some(ty) = Type::from_oid(oid) { - return Ok(ty); - } - - if let Some(ty) = self.unknown_types.get(&oid) { - return Ok(ty.clone()); - } - - let ty = self.read_type(oid)?; - self.unknown_types.insert(oid, ty.clone()); - Ok(ty) - } - - fn parse_cols(&mut self, raw: Option) -> Result> { - match raw { - Some(body) => body - .fields() - .and_then(|field| { - Ok(Column::new( - field.name().to_owned(), - self.get_type(field.type_oid())?, - )) - }).collect() - .map_err(From::from), - None => Ok(vec![]), - } - } - - fn setup_typeinfo_query(&mut self) -> Result<()> { - if self.has_typeinfo_query { - return Ok(()); - } - - match self.raw_prepare( - TYPEINFO_QUERY, - "SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, \ - t.typbasetype, n.nspname, t.typrelid \ - FROM pg_catalog.pg_type t \ - LEFT OUTER JOIN pg_catalog.pg_range r ON \ - r.rngtypid = t.oid \ - INNER JOIN pg_catalog.pg_namespace n ON \ - t.typnamespace = n.oid \ - WHERE t.oid = $1", - &[], - ) { - Ok(..) => {} - // Range types weren't added until Postgres 9.2, so pg_range may not exist - Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { - self.raw_prepare( - TYPEINFO_QUERY, - "SELECT t.typname, t.typtype, t.typelem, NULL::OID, \ - t.typbasetype, n.nspname, t.typrelid \ - FROM pg_catalog.pg_type t \ - INNER JOIN pg_catalog.pg_namespace n \ - ON t.typnamespace = n.oid \ - WHERE t.oid = $1", - &[], - )?; - } - Err(e) => return Err(e), - } - - self.has_typeinfo_query = true; - Ok(()) - } - - #[allow(if_not_else)] - fn read_type(&mut self, oid: Oid) -> Result { - self.setup_typeinfo_query()?; - self.raw_execute(TYPEINFO_QUERY, "", 0, &[Type::OID], &[&oid])?; - let mut row = None; - self.read_rows(|r| row = Some(r))?; - - let get_raw = |i: usize| row.as_ref().and_then(|r| r.get(i)); - - let (name, type_, elem_oid, rngsubtype, basetype, schema, relid) = { - let name = - String::from_sql_nullable(&Type::NAME, get_raw(0)).map_err(error::conversion)?; - let type_ = - i8::from_sql_nullable(&Type::CHAR, get_raw(1)).map_err(error::conversion)?; - let elem_oid = - Oid::from_sql_nullable(&Type::OID, get_raw(2)).map_err(error::conversion)?; - let rngsubtype = Option::::from_sql_nullable(&Type::OID, get_raw(3)) - .map_err(error::conversion)?; - let basetype = - Oid::from_sql_nullable(&Type::OID, get_raw(4)).map_err(error::conversion)?; - let schema = - String::from_sql_nullable(&Type::NAME, get_raw(5)).map_err(error::conversion)?; - let relid = - Oid::from_sql_nullable(&Type::OID, get_raw(6)).map_err(error::conversion)?; - (name, type_, elem_oid, rngsubtype, basetype, schema, relid) - }; - - let kind = if type_ == b'e' as i8 { - Kind::Enum(self.read_enum_variants(oid)?) - } else if type_ == b'p' as i8 { - Kind::Pseudo - } else if basetype != 0 { - Kind::Domain(self.get_type(basetype)?) - } else if elem_oid != 0 { - Kind::Array(self.get_type(elem_oid)?) - } else if relid != 0 { - Kind::Composite(self.read_composite_fields(relid)?) - } else { - match rngsubtype { - Some(oid) => Kind::Range(self.get_type(oid)?), - None => Kind::Simple, - } - }; - - Ok(Type::_new(name, oid, kind, schema)) - } - - fn setup_typeinfo_enum_query(&mut self) -> Result<()> { - if self.has_typeinfo_enum_query { - return Ok(()); - } - - match self.raw_prepare( - TYPEINFO_ENUM_QUERY, - "SELECT enumlabel \ - FROM pg_catalog.pg_enum \ - WHERE enumtypid = $1 \ - ORDER BY enumsortorder", - &[], - ) { - Ok(..) => {} - // Postgres 9.0 doesn't have enumsortorder - Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { - self.raw_prepare( - TYPEINFO_ENUM_QUERY, - "SELECT enumlabel \ - FROM pg_catalog.pg_enum \ - WHERE enumtypid = $1 \ - ORDER BY oid", - &[], - )?; - } - Err(e) => return Err(e), - } - - self.has_typeinfo_enum_query = true; - Ok(()) - } - - fn read_enum_variants(&mut self, oid: Oid) -> Result> { - self.setup_typeinfo_enum_query()?; - self.raw_execute(TYPEINFO_ENUM_QUERY, "", 0, &[Type::OID], &[&oid])?; - let mut rows = vec![]; - self.read_rows(|row| rows.push(row))?; - - let mut variants = vec![]; - for row in rows { - variants.push( - String::from_sql_nullable(&Type::NAME, row.get(0)).map_err(error::conversion)?, - ); - } - - Ok(variants) - } - - fn setup_typeinfo_composite_query(&mut self) -> Result<()> { - if self.has_typeinfo_composite_query { - return Ok(()); - } - - self.raw_prepare( - TYPEINFO_COMPOSITE_QUERY, - "SELECT attname, atttypid \ - FROM pg_catalog.pg_attribute \ - WHERE attrelid = $1 \ - AND NOT attisdropped \ - AND attnum > 0 \ - ORDER BY attnum", - &[], - )?; - - self.has_typeinfo_composite_query = true; - Ok(()) - } - - fn read_composite_fields(&mut self, relid: Oid) -> Result> { - self.setup_typeinfo_composite_query()?; - self.raw_execute(TYPEINFO_COMPOSITE_QUERY, "", 0, &[Type::OID], &[&relid])?; - let mut rows = vec![]; - self.read_rows(|row| rows.push(row))?; - - let mut fields = vec![]; - for row in rows { - let (name, type_) = { - let name = String::from_sql_nullable(&Type::NAME, row.get(0)) - .map_err(error::conversion)?; - let type_ = - Oid::from_sql_nullable(&Type::OID, row.get(1)).map_err(error::conversion)?; - (name, type_) - }; - let type_ = self.get_type(type_)?; - fields.push(Field::new(name, type_)); - } - - Ok(fields) - } - - fn is_desynchronized(&self) -> bool { - self.desynchronized - } - - #[allow(needless_return)] - fn wait_for_ready(&mut self) -> Result<()> { - match self.read_message()? { - backend::Message::ReadyForQuery(_) => Ok(()), - _ => bad_response!(self), - } - } - - fn simple_query_(&mut self, query: &str) -> Result> { - check_desync!(self); - debug!("executing query: {}", query); - self.stream - .write_message(|buf| frontend::query(query, buf))?; - self.stream.flush()?; - - let mut result = vec![]; - let mut rows = vec![]; - let mut columns = None; - - loop { - match self.read_message()? { - backend::Message::ReadyForQuery(_) => break, - backend::Message::DataRow(body) => { - rows.push(RowData::new(body)?); - } - backend::Message::CopyInResponse(_) => { - self.stream.write_message(|buf| { - frontend::copy_fail("COPY queries cannot be directly executed", buf) - })?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - } - backend::Message::ErrorResponse(body) => { - self.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - backend::Message::RowDescription(body) => { - columns = Some(self.parse_cols(Some(body))?); - } - backend::Message::CommandComplete(_) => { - if let Some(cols) = columns.take() { - result.push(TextRows::new(cols, mem::replace(&mut rows, Vec::new()))); - } - } - _ => bad_response!(self), - } - } - Ok(result) - } - - fn quick_query(&mut self, query: &str) -> Result>>> { - check_desync!(self); - debug!("executing query: {}", query); - self.stream - .write_message(|buf| frontend::query(query, buf))?; - self.stream.flush()?; - - let mut result = vec![]; - loop { - match self.read_message()? { - backend::Message::ReadyForQuery(_) => break, - backend::Message::DataRow(body) => { - let row = body - .ranges() - .map(|r| r.map(|r| String::from_utf8_lossy(&body.buffer()[r]).into_owned())) - .collect()?; - result.push(row); - } - backend::Message::CopyInResponse(_) => { - self.stream.write_message(|buf| { - frontend::copy_fail("COPY queries cannot be directly executed", buf) - })?; - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - self.stream.flush()?; - } - backend::Message::ErrorResponse(body) => { - self.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => {} - } - } - Ok(result) - } - - fn finish_inner(&mut self) -> Result<()> { - check_desync!(self); - self.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::terminate(buf)))?; - self.stream.flush()?; - Ok(()) - } -} - -fn _ensure_send() { - fn _is_send() {} - _is_send::(); -} - -/// A connection to a Postgres database. -pub struct Connection(RefCell); - -impl fmt::Debug for Connection { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let conn = self.0.borrow(); - fmt.debug_struct("Connection") - .field("stream", &conn.stream.get_ref()) - .field("cancel_data", &conn.cancel_data) - .field("notifications", &conn.notifications.len()) - .field("transaction_depth", &conn.trans_depth) - .field("desynchronized", &conn.desynchronized) - .field("cached_statements", &conn.cached_statements.len()) - .finish() - } -} - -impl Connection { - /// Creates a new connection to a Postgres database. - /// - /// Most applications can use a URL string in the normal format: - /// - /// ```notrust - /// postgresql://user[:password]@host[:port][/database][?param1=val1[[¶m2=val2]...]] - /// ``` - /// - /// The password may be omitted if not required. The default Postgres port - /// (5432) is used if none is specified. The database name defaults to the - /// username if not specified. - /// - /// To connect to the server via Unix sockets, `host` should be set to the - /// absolute path of the directory containing the socket file. Since `/` is - /// a reserved character in URLs, the path should be URL encoded. If the - /// path contains non-UTF 8 characters, a `ConnectParams` struct should be - /// created manually and passed in. Note that Postgres does not support TLS - /// over Unix sockets. - /// - /// # Examples - /// - /// To connect over TCP: - /// - /// ```rust,no_run - /// use postgres::{Connection, TlsMode}; - /// - /// let url = "postgresql://postgres:hunter2@localhost:5433:2994/foodb"; - /// let conn = Connection::connect(url, TlsMode::None).unwrap(); - /// ``` - /// - /// To connect over a Unix socket located in `/run/postgres`: - /// - /// ```rust,no_run - /// use postgres::{Connection, TlsMode}; - /// - /// let url = "postgresql://postgres@%2Frun%2Fpostgres"; - /// let conn = Connection::connect(url, TlsMode::None).unwrap(); - /// ``` - /// - /// To connect with a manually constructed `ConnectParams`: - /// - /// ```rust,no_run - /// use postgres::{Connection, TlsMode}; - /// use postgres::params::{ConnectParams, Host}; - /// # use std::path::PathBuf; - /// - /// # #[cfg(unix)] - /// # fn f() { - /// # let some_crazy_path = PathBuf::new(); - /// let params = ConnectParams::builder() - /// .user("postgres", None) - /// .build(Host::Unix(some_crazy_path)); - /// let conn = Connection::connect(params, TlsMode::None).unwrap(); - /// # } - /// ``` - pub fn connect(params: T, tls: TlsMode) -> Result - where - T: IntoConnectParams, - { - InnerConnection::connect(params, tls).map(|conn| Connection(RefCell::new(conn))) - } - - /// Executes a statement, returning the number of rows modified. - /// - /// A statement may contain parameters, specified by `$n` where `n` is the - /// index of the parameter in the list provided, 1-indexed. - /// - /// If the statement does not modify any rows (e.g. SELECT), 0 is returned. - /// - /// If the same statement will be repeatedly executed (perhaps with - /// different query parameters), consider using the `prepare` and - /// `prepare_cached` methods. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number - /// expected. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// # let bar = 1i32; - /// # let baz = true; - /// let rows_updated = conn.execute("UPDATE foo SET bar = $1 WHERE baz = $2", &[&bar, &baz]) - /// .unwrap(); - /// println!("{} rows updated", rows_updated); - /// ``` - pub fn execute(&self, query: &str, params: &[&ToSql]) -> Result { - let (param_types, columns) = self.0.borrow_mut().raw_prepare("", query, &[])?; - let info = Arc::new(StatementInfo { - name: String::new(), - param_types: param_types, - columns: columns, - }); - let stmt = Statement::new(self, info, Cell::new(0), true); - stmt.execute(params) - } - - /// Executes a statement, returning the resulting rows. - /// - /// A statement may contain parameters, specified by `$n` where `n` is the - /// index of the parameter in the list provided, 1-indexed. - /// - /// If the same statement will be repeatedly executed (perhaps with - /// different query parameters), consider using the `prepare` and - /// `prepare_cached` methods. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number - /// expected. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// # let baz = true; - /// for row in &conn.query("SELECT foo FROM bar WHERE baz = $1", &[&baz]).unwrap() { - /// let foo: i32 = row.get("foo"); - /// println!("foo: {}", foo); - /// } - /// ``` - pub fn query(&self, query: &str, params: &[&ToSql]) -> Result { - let (param_types, columns) = self.0.borrow_mut().raw_prepare("", query, &[])?; - let info = Arc::new(StatementInfo { - name: String::new(), - param_types: param_types, - columns: columns, - }); - let stmt = Statement::new(self, info, Cell::new(0), true); - stmt.into_query(params) - } - - /// Begins a new transaction. - /// - /// Returns a `Transaction` object which should be used instead of - /// the connection for the duration of the transaction. The transaction - /// is active until the `Transaction` object falls out of scope. - /// - /// # Note - /// A transaction will roll back by default. The `set_commit`, - /// `set_rollback`, and `commit` methods alter this behavior. - /// - /// # Panics - /// - /// Panics if a transaction is already active. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// let trans = conn.transaction().unwrap(); - /// trans.execute("UPDATE foo SET bar = 10", &[]).unwrap(); - /// // ... - /// - /// trans.commit().unwrap(); - /// ``` - pub fn transaction<'a>(&'a self) -> Result> { - self.transaction_with(&transaction::Config::new()) - } - - /// Begins a new transaction with the specified configuration. - pub fn transaction_with<'a>(&'a self, config: &transaction::Config) -> Result> { - let mut conn = self.0.borrow_mut(); - check_desync!(conn); - assert!( - conn.trans_depth == 0, - "`transaction` must be called on the active transaction" - ); - let mut query = "BEGIN".to_owned(); - config.build_command(&mut query); - conn.quick_query(&query)?; - conn.trans_depth += 1; - Ok(Transaction::new(self, 1)) - } - - /// Creates a new prepared statement. - /// - /// If the same statement will be executed repeatedly, explicitly preparing - /// it can improve performance. - /// - /// The statement is associated with the connection that created it and may - /// not outlive that connection. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let x = 10i32; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// # let (a, b) = (0i32, 1i32); - /// # let updates = vec![(&a, &b)]; - /// let stmt = conn.prepare("UPDATE foo SET bar = $1 WHERE baz = $2").unwrap(); - /// for (bar, baz) in updates { - /// stmt.execute(&[bar, baz]).unwrap(); - /// } - /// ``` - pub fn prepare<'a>(&'a self, query: &str) -> Result> { - self.prepare_typed(query, &[]) - } - - /// Like `prepare`, but allows for the types of query parameters to be explicitly specified. - /// - /// Postgres will normally infer the types of parameters, but this function offers more control - /// of that behavior. `None` will cause Postgres to infer the type. The list of types can be - /// shorter than the number of parameters in the query; it will act as if padded out with `None` - /// values. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # use postgres::types::Type; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// // $1 would normally be assigned the type INT4, but we can override that to INT8 - /// let stmt = conn.prepare_typed("SELECT $1::INT4", &[Some(Type::INT8)]).unwrap(); - /// assert_eq!(stmt.param_types()[0], Type::INT8); - /// ``` - pub fn prepare_typed<'a>( - &'a self, - query: &str, - types: &[Option], - ) -> Result> { - self.0.borrow_mut().prepare_typed(query, types, self) - } - - /// Creates a cached prepared statement. - /// - /// Like `prepare`, except that the statement is only prepared once over - /// the lifetime of the connection and then cached. If the same statement - /// is going to be prepared frequently, caching it can improve performance - /// by reducing the number of round trips to the Postgres backend. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let x = 10i32; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// # let (a, b) = (0i32, 1i32); - /// # let updates = vec![(&a, &b)]; - /// let stmt = conn.prepare_cached("UPDATE foo SET bar = $1 WHERE baz = $2").unwrap(); - /// for (bar, baz) in updates { - /// stmt.execute(&[bar, baz]).unwrap(); - /// } - /// ``` - pub fn prepare_cached<'a>(&'a self, query: &str) -> Result> { - self.0.borrow_mut().prepare_cached(query, self) - } - - /// Returns the isolation level which will be used for future transactions. - /// - /// This is a simple wrapper around `SHOW TRANSACTION ISOLATION LEVEL`. - pub fn transaction_isolation(&self) -> Result { - let mut conn = self.0.borrow_mut(); - check_desync!(conn); - let result = conn.quick_query("SHOW TRANSACTION ISOLATION LEVEL")?; - IsolationLevel::new(result[0][0].as_ref().unwrap()) - } - - /// Sets the configuration that will be used for future transactions. - pub fn set_transaction_config(&self, config: &transaction::Config) -> Result<()> { - let mut command = "SET SESSION CHARACTERISTICS AS TRANSACTION".to_owned(); - config.build_command(&mut command); - self.simple_query(&command).map(|_| ()) - } - - /// Execute a sequence of SQL statements. - /// - /// Statements should be separated by `;` characters. If an error occurs, - /// execution of the sequence will stop at that point. This is intended for - /// execution of batches of non-dynamic statements - for example, creation - /// of a schema for a fresh database. - /// - /// # Warning - /// - /// Prepared statements should be used for any SQL statement which contains - /// user-specified data, as it provides functionality to safely embed that - /// data in the statement. Do not form statements via string concatenation - /// and feed them into this method. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode, Result}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// conn.batch_execute(" - /// CREATE TABLE person ( - /// id SERIAL PRIMARY KEY, - /// name NOT NULL - /// ); - /// - /// CREATE TABLE purchase ( - /// id SERIAL PRIMARY KEY, - /// person INT NOT NULL REFERENCES person (id), - /// time TIMESTAMPTZ NOT NULL, - /// ); - /// - /// CREATE INDEX ON purchase (time); - /// ").unwrap(); - /// ``` - #[deprecated(since = "0.15.3", note = "please use `simple_query` instead")] - pub fn batch_execute(&self, query: &str) -> Result<()> { - self.0.borrow_mut().quick_query(query).map(|_| ()) - } - - /// Send a simple, non-prepared query - /// - /// Executes a query without making a prepared statement. All result columns - /// are returned in a UTF-8 text format rather than compact binary - /// representations. This can be useful when communicating with services - /// like _pgbouncer_ which speak "basic" postgres but don't support prepared - /// statements. - /// - /// Because rust-postgres' query parameter substitution relies on prepared - /// statements, it's not possible to pass a separate parameters list with - /// this API. - /// - /// In general, the `query` API should be preferred whenever possible. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// for response in &conn.simple_query("SELECT foo FROM bar WHERE baz = 'quux'").unwrap() { - /// for row in response { - /// let foo: &str = row.get("foo"); - /// println!("foo: {}", foo); - /// } - /// } - /// ``` - pub fn simple_query(&self, query: &str) -> Result> { - self.0.borrow_mut().simple_query_(query) - } - - /// Returns a structure providing access to asynchronous notifications. - /// - /// Use the `LISTEN` command to register this connection for notifications. - pub fn notifications<'a>(&'a self) -> Notifications<'a> { - Notifications::new(self) - } - - /// Returns information used to cancel pending queries. - /// - /// Used with the `cancel_query` function. The object returned can be used - /// to cancel any query executed by the connection it was created from. - pub fn cancel_data(&self) -> CancelData { - self.0.borrow().cancel_data - } - - /// Returns the value of the specified Postgres backend parameter, such as - /// `timezone` or `server_version`. - pub fn parameter(&self, param: &str) -> Option { - self.0.borrow().parameters.get(param).cloned() - } - - /// Sets the notice handler for the connection, returning the old handler. - pub fn set_notice_handler(&self, handler: Box) -> Box { - self.0.borrow_mut().set_notice_handler(handler) - } - - /// Returns whether or not the stream has been desynchronized due to an - /// error in the communication channel with the server. - /// - /// If this has occurred, all further queries will immediately return an - /// error. - pub fn is_desynchronized(&self) -> bool { - self.0.borrow().is_desynchronized() - } - - /// Determines if the `Connection` is currently "active", that is, if there - /// are no active transactions. - /// - /// The `transaction` method can only be called on the active `Connection` - /// or `Transaction`. - pub fn is_active(&self) -> bool { - self.0.borrow().trans_depth == 0 - } - - /// Consumes the connection, closing it. - /// - /// Functionally equivalent to the `Drop` implementation for `Connection` - /// except that it returns any error encountered to the caller. - pub fn finish(self) -> Result<()> { - let mut conn = self.0.borrow_mut(); - conn.finished = true; - conn.finish_inner() - } -} - -/// A trait allowing abstraction over connections and transactions -pub trait GenericConnection { - /// Like `Connection::execute`. - fn execute(&self, query: &str, params: &[&ToSql]) -> Result; - - /// Like `Connection::query`. - fn query<'a>(&'a self, query: &str, params: &[&ToSql]) -> Result; - - /// Like `Connection::prepare`. - fn prepare<'a>(&'a self, query: &str) -> Result>; - - /// Like `Connection::prepare_cached`. - fn prepare_cached<'a>(&'a self, query: &str) -> Result>; - - /// Like `Connection::transaction`. - fn transaction<'a>(&'a self) -> Result>; - - /// Like `Connection::batch_execute`. - #[deprecated(since = "0.15.3", note = "please use `simple_query` instead")] - fn batch_execute(&self, query: &str) -> Result<()>; - - /// Like `Connection::is_active`. - fn is_active(&self) -> bool; - - /// Like `Connection::simple_query`. - fn simple_query(&self, query: &str) -> Result>; -} - -impl GenericConnection for Connection { - fn execute(&self, query: &str, params: &[&ToSql]) -> Result { - self.execute(query, params) - } - - fn query<'a>(&'a self, query: &str, params: &[&ToSql]) -> Result { - self.query(query, params) - } - - fn prepare<'a>(&'a self, query: &str) -> Result> { - self.prepare(query) - } - - fn prepare_cached<'a>(&'a self, query: &str) -> Result> { - self.prepare_cached(query) - } - - fn transaction<'a>(&'a self) -> Result> { - self.transaction() - } - - fn batch_execute(&self, query: &str) -> Result<()> { - self.simple_query(query).map(|_| ()) - } - - fn is_active(&self) -> bool { - self.is_active() - } - - fn simple_query(&self, query: &str) -> Result> { - self.simple_query(query) - } -} - -impl<'a> GenericConnection for Transaction<'a> { - fn execute(&self, query: &str, params: &[&ToSql]) -> Result { - self.execute(query, params) - } - - fn query<'b>(&'b self, query: &str, params: &[&ToSql]) -> Result { - self.query(query, params) - } - - fn prepare<'b>(&'b self, query: &str) -> Result> { - self.prepare(query) - } - - fn prepare_cached<'b>(&'b self, query: &str) -> Result> { - self.prepare_cached(query) - } - - fn transaction<'b>(&'b self) -> Result> { - self.transaction() - } - - fn batch_execute(&self, query: &str) -> Result<()> { - self.simple_query(query).map(|_| ()) - } - - fn simple_query(&self, query: &str) -> Result> { - self.simple_query(query) - } - - fn is_active(&self) -> bool { - self.is_active() - } -} - -fn err(fields: &mut ErrorFields) -> Error { - match DbError::new(fields) { - Ok(err) => error::db(err), - Err(err) => err.into(), - } -} diff --git a/postgres/src/macros.rs b/postgres/src/macros.rs deleted file mode 100644 index 939a0344a..000000000 --- a/postgres/src/macros.rs +++ /dev/null @@ -1,69 +0,0 @@ -macro_rules! try_desync { - ($s:expr, $e:expr) => ( - match $e { - Ok(ok) => ok, - Err(err) => { - $s.desynchronized = true; - return Err(::std::convert::From::from(err)); - } - } - ) -} - -macro_rules! check_desync { - ($e:expr) => ({ - if $e.is_desynchronized() { - return Err(::desynchronized().into()); - } - }) -} - -macro_rules! bad_response { - ($s:expr) => {{ - debug!("Bad response at {}:{}", file!(), line!()); - $s.desynchronized = true; - return Err(::bad_response().into()); - }}; -} - -#[cfg(feature = "no-logging")] -macro_rules! debug { - ($($t:tt)*) => {}; -} - -#[cfg(feature = "no-logging")] -macro_rules! info { - ($($t:tt)*) => {}; -} - -/// Generates a simple implementation of `ToSql::accepts` which accepts the -/// types passed to it. -#[macro_export] -macro_rules! accepts { - ($($expected:pat),+) => ( - fn accepts(ty: &$crate::types::Type) -> bool { - match *ty { - $($expected)|+ => true, - _ => false - } - } - ) -} - -/// Generates an implementation of `ToSql::to_sql_checked`. -/// -/// All `ToSql` implementations should use this macro. -#[macro_export] -macro_rules! to_sql_checked { - () => { - fn to_sql_checked(&self, - ty: &$crate::types::Type, - out: &mut ::std::vec::Vec) - -> ::std::result::Result<$crate::types::IsNull, - Box<::std::error::Error + - ::std::marker::Sync + - ::std::marker::Send>> { - $crate::types::__to_sql_checked(self, ty, out) - } - } -} diff --git a/postgres/src/notification.rs b/postgres/src/notification.rs deleted file mode 100644 index 73eb98626..000000000 --- a/postgres/src/notification.rs +++ /dev/null @@ -1,203 +0,0 @@ -//! Asynchronous notifications. - -use error::DbError; -use fallible_iterator::{FallibleIterator, IntoFallibleIterator}; -use postgres_protocol::message::backend::{self, ErrorFields}; -use std::fmt; -use std::time::Duration; - -#[doc(inline)] -use postgres_shared; -pub use postgres_shared::Notification; - -use error::Error; -use {desynchronized, Connection, Result}; - -/// Notifications from the Postgres backend. -pub struct Notifications<'conn> { - conn: &'conn Connection, -} - -impl<'a> fmt::Debug for Notifications<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Notifications") - .field("pending", &self.len()) - .finish() - } -} - -impl<'conn> Notifications<'conn> { - pub(crate) fn new(conn: &'conn Connection) -> Notifications<'conn> { - Notifications { conn: conn } - } - - /// Returns the number of pending notifications. - pub fn len(&self) -> usize { - self.conn.0.borrow().notifications.len() - } - - /// Determines if there are any pending notifications. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a fallible iterator over pending notifications. - /// - /// # Note - /// - /// This iterator may start returning `Some` after previously returning - /// `None` if more notifications are received. - pub fn iter<'a>(&'a self) -> Iter<'a> { - Iter { conn: self.conn } - } - - /// Returns a fallible iterator over notifications that blocks until one is - /// received if none are pending. - /// - /// The iterator will never return `None`. - pub fn blocking_iter<'a>(&'a self) -> BlockingIter<'a> { - BlockingIter { conn: self.conn } - } - - /// Returns a fallible iterator over notifications that blocks for a limited - /// time waiting to receive one if none are pending. - /// - /// # Note - /// - /// This iterator may start returning `Some` after previously returning - /// `None` if more notifications are received. - pub fn timeout_iter<'a>(&'a self, timeout: Duration) -> TimeoutIter<'a> { - TimeoutIter { - conn: self.conn, - timeout: timeout, - } - } -} - -impl<'a, 'conn> IntoFallibleIterator for &'a Notifications<'conn> { - type Item = Notification; - type Error = Error; - type IntoIter = Iter<'a>; - - fn into_fallible_iterator(self) -> Iter<'a> { - self.iter() - } -} - -/// A fallible iterator over pending notifications. -pub struct Iter<'a> { - conn: &'a Connection, -} - -impl<'a> FallibleIterator for Iter<'a> { - type Item = Notification; - type Error = Error; - - fn next(&mut self) -> Result> { - let mut conn = self.conn.0.borrow_mut(); - - if let Some(notification) = conn.notifications.pop_front() { - return Ok(Some(notification)); - } - - if conn.is_desynchronized() { - return Err(desynchronized().into()); - } - - match conn.read_message_with_notification_nonblocking() { - Ok(Some(backend::Message::NotificationResponse(body))) => Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })), - Ok(Some(backend::Message::ErrorResponse(body))) => Err(err(&mut body.fields())), - Ok(None) => Ok(None), - Err(err) => Err(err.into()), - _ => unreachable!(), - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.conn.0.borrow().notifications.len(), None) - } -} - -/// An iterator over notifications which will block if none are pending. -pub struct BlockingIter<'a> { - conn: &'a Connection, -} - -impl<'a> FallibleIterator for BlockingIter<'a> { - type Item = Notification; - type Error = Error; - - fn next(&mut self) -> Result> { - let mut conn = self.conn.0.borrow_mut(); - - if let Some(notification) = conn.notifications.pop_front() { - return Ok(Some(notification)); - } - - if conn.is_desynchronized() { - return Err(desynchronized().into()); - } - - match conn.read_message_with_notification() { - Ok(backend::Message::NotificationResponse(body)) => Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })), - Ok(backend::Message::ErrorResponse(body)) => Err(err(&mut body.fields())), - Err(err) => Err(err.into()), - _ => unreachable!(), - } - } -} - -/// An iterator over notifications which will block for a period of time if -/// none are pending. -pub struct TimeoutIter<'a> { - conn: &'a Connection, - timeout: Duration, -} - -impl<'a> FallibleIterator for TimeoutIter<'a> { - type Item = Notification; - type Error = Error; - - fn next(&mut self) -> Result> { - let mut conn = self.conn.0.borrow_mut(); - - if let Some(notification) = conn.notifications.pop_front() { - return Ok(Some(notification)); - } - - if conn.is_desynchronized() { - return Err(desynchronized().into()); - } - - match conn.read_message_with_notification_timeout(self.timeout) { - Ok(Some(backend::Message::NotificationResponse(body))) => Ok(Some(Notification { - process_id: body.process_id(), - channel: body.channel()?.to_owned(), - payload: body.message()?.to_owned(), - })), - Ok(Some(backend::Message::ErrorResponse(body))) => Err(err(&mut body.fields())), - Ok(None) => Ok(None), - Err(err) => Err(err.into()), - _ => unreachable!(), - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.conn.0.borrow().notifications.len(), None) - } -} - -fn err(fields: &mut ErrorFields) -> Error { - match DbError::new(fields) { - Ok(err) => postgres_shared::error::db(err), - Err(err) => err.into(), - } -} diff --git a/postgres/src/params.rs b/postgres/src/params.rs deleted file mode 100644 index 29e512286..000000000 --- a/postgres/src/params.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Connection parameters - -pub use postgres_shared::params::{Builder, ConnectParams, Host, IntoConnectParams, User}; diff --git a/postgres/src/priv_io.rs b/postgres/src/priv_io.rs deleted file mode 100644 index c0e3a1318..000000000 --- a/postgres/src/priv_io.rs +++ /dev/null @@ -1,259 +0,0 @@ -use bytes::{BufMut, BytesMut}; -use postgres_protocol::message::backend; -use postgres_protocol::message::frontend; -use socket2::{Domain, SockAddr, Socket, Type}; -use std::io::{self, BufWriter, Read, Write}; -use std::net::{SocketAddr, ToSocketAddrs}; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, RawFd}; -#[cfg(windows)] -use std::os::windows::io::{AsRawSocket, RawSocket}; -use std::result; -use std::time::Duration; - -use error; -use params::{ConnectParams, Host}; -use tls::TlsStream; -use {Result, TlsMode}; - -const INITIAL_CAPACITY: usize = 8 * 1024; - -pub struct MessageStream { - stream: BufWriter>, - in_buf: BytesMut, - out_buf: Vec, -} - -impl MessageStream { - pub fn new(stream: Box) -> MessageStream { - MessageStream { - stream: BufWriter::new(stream), - in_buf: BytesMut::with_capacity(INITIAL_CAPACITY), - out_buf: vec![], - } - } - - pub fn get_ref(&self) -> &TlsStream { - &**self.stream.get_ref() - } - - pub fn write_message(&mut self, f: F) -> result::Result<(), E> - where - F: FnOnce(&mut Vec) -> result::Result<(), E>, - E: From, - { - self.out_buf.clear(); - f(&mut self.out_buf)?; - self.stream.write_all(&self.out_buf).map_err(From::from) - } - - pub fn read_message(&mut self) -> io::Result { - loop { - match backend::Message::parse(&mut self.in_buf) { - Ok(Some(message)) => return Ok(message), - Ok(None) => self.read_in()?, - Err(e) => return Err(e), - } - } - } - - fn read_in(&mut self) -> io::Result<()> { - self.in_buf.reserve(1); - match self - .stream - .get_mut() - .read(unsafe { self.in_buf.bytes_mut() }) - { - Ok(0) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "unexpected EOF", - )), - Ok(n) => { - unsafe { self.in_buf.advance_mut(n) }; - Ok(()) - } - Err(e) => Err(e), - } - } - - pub fn read_message_timeout( - &mut self, - timeout: Duration, - ) -> io::Result> { - if self.in_buf.is_empty() { - self.set_read_timeout(Some(timeout))?; - let r = self.read_in(); - self.set_read_timeout(None)?; - - match r { - Ok(()) => {} - Err(ref e) - if e.kind() == io::ErrorKind::WouldBlock - || e.kind() == io::ErrorKind::TimedOut => - { - return Ok(None) - } - Err(e) => return Err(e), - } - } - - self.read_message().map(Some) - } - - pub fn read_message_nonblocking(&mut self) -> io::Result> { - if self.in_buf.is_empty() { - self.set_nonblocking(true)?; - let r = self.read_in(); - self.set_nonblocking(false)?; - - match r { - Ok(()) => {} - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(None), - Err(e) => return Err(e), - } - } - - self.read_message().map(Some) - } - - pub fn flush(&mut self) -> io::Result<()> { - self.stream.flush() - } - - fn set_read_timeout(&self, timeout: Option) -> io::Result<()> { - self.stream.get_ref().get_ref().0.set_read_timeout(timeout) - } - - fn set_nonblocking(&self, nonblock: bool) -> io::Result<()> { - self.stream.get_ref().get_ref().0.set_nonblocking(nonblock) - } -} - -/// A connection to the Postgres server. -/// -/// It implements `Read`, `Write` and `TlsStream`, as well as `AsRawFd` on -/// Unix platforms and `AsRawSocket` on Windows platforms. -#[derive(Debug)] -pub struct Stream(Socket); - -impl Read for Stream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } -} - -impl Write for Stream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } -} - -impl TlsStream for Stream { - fn get_ref(&self) -> &Stream { - self - } - - fn get_mut(&mut self) -> &mut Stream { - self - } -} - -#[cfg(unix)] -impl AsRawFd for Stream { - fn as_raw_fd(&self) -> RawFd { - self.0.as_raw_fd() - } -} - -#[cfg(windows)] -impl AsRawSocket for Stream { - fn as_raw_socket(&self) -> RawSocket { - self.0.as_raw_socket() - } -} - -fn open_socket(params: &ConnectParams) -> Result { - let port = params.port(); - match *params.host() { - Host::Tcp(ref host) => { - let mut error = None; - for addr in (&**host, port).to_socket_addrs()? { - let domain = match addr { - SocketAddr::V4(_) => Domain::ipv4(), - SocketAddr::V6(_) => Domain::ipv6(), - }; - let socket = Socket::new(domain, Type::stream(), None)?; - if let Some(keepalive) = params.keepalive() { - socket.set_keepalive(Some(keepalive))?; - } - let addr = SockAddr::from(addr); - let r = match params.connect_timeout() { - Some(timeout) => socket.connect_timeout(&addr, timeout), - None => socket.connect(&addr), - }; - match r { - Ok(()) => return Ok(socket), - Err(e) => error = Some(e), - } - } - - Err(error - .unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve any addresses", - ) - }).into()) - } - #[cfg(unix)] - Host::Unix(ref path) => { - let path = path.join(&format!(".s.PGSQL.{}", port)); - let socket = Socket::new(Domain::unix(), Type::stream(), None)?; - let addr = SockAddr::unix(path)?; - socket.connect(&addr)?; - Ok(socket) - } - #[cfg(not(unix))] - Host::Unix(..) => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "unix sockets are not supported on this system", - ).into()), - } -} - -pub fn initialize_stream(params: &ConnectParams, tls: TlsMode) -> Result> { - let mut socket = Stream(open_socket(params)?); - - let (tls_required, handshaker) = match tls { - TlsMode::None => return Ok(Box::new(socket)), - TlsMode::Prefer(handshaker) => (false, handshaker), - TlsMode::Require(handshaker) => (true, handshaker), - }; - - let mut buf = vec![]; - frontend::ssl_request(&mut buf); - socket.write_all(&buf)?; - socket.flush()?; - - let mut b = [0; 1]; - socket.read_exact(&mut b)?; - if b[0] == b'N' { - if tls_required { - return Err(error::tls("the server does not support TLS".into())); - } else { - return Ok(Box::new(socket)); - } - } - - let host = match *params.host() { - Host::Tcp(ref host) => host, - // Postgres doesn't support TLS over unix sockets - Host::Unix(_) => return Err(::bad_response().into()), - }; - - handshaker.tls_handshake(host, socket).map_err(error::tls) -} diff --git a/postgres/src/rows.rs b/postgres/src/rows.rs deleted file mode 100644 index 25b006fd8..000000000 --- a/postgres/src/rows.rs +++ /dev/null @@ -1,342 +0,0 @@ -//! Query result rows. - -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::frontend; -use postgres_shared::rows::RowData; -use std::collections::VecDeque; -use std::fmt; -use std::io; -use std::ops::Deref; -use std::slice; -use std::sync::Arc; - -#[doc(inline)] -pub use postgres_shared::rows::RowIndex; - -use error; -use stmt::{Column, Statement}; -use transaction::Transaction; -use types::{FromSql, WrongType}; -use {Error, Result, StatementInfo}; - -enum MaybeOwned<'a, T: 'a> { - Borrowed(&'a T), - Owned(T), -} - -impl<'a, T> Deref for MaybeOwned<'a, T> { - type Target = T; - - fn deref(&self) -> &T { - match *self { - MaybeOwned::Borrowed(s) => s, - MaybeOwned::Owned(ref s) => s, - } - } -} - -/// The resulting rows of a query. -pub struct Rows { - stmt_info: Arc, - data: Vec, -} - -impl fmt::Debug for Rows { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Rows") - .field("columns", &self.columns()) - .field("rows", &self.data.len()) - .finish() - } -} - -impl Rows { - pub(crate) fn new(stmt: &Statement, data: Vec) -> Rows { - Rows { - stmt_info: stmt.info().clone(), - data: data, - } - } - - /// Returns a slice describing the columns of the `Rows`. - pub fn columns(&self) -> &[Column] { - &self.stmt_info.columns[..] - } - - /// Returns the number of rows present. - pub fn len(&self) -> usize { - self.data.len() - } - - /// Determines if there are any rows present. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a specific `Row`. - /// - /// # Panics - /// - /// Panics if `idx` is out of bounds. - pub fn get<'a>(&'a self, idx: usize) -> Row<'a> { - Row { - stmt_info: &self.stmt_info, - data: MaybeOwned::Borrowed(&self.data[idx]), - } - } - - /// Returns an iterator over the `Row`s. - pub fn iter<'a>(&'a self) -> Iter<'a> { - Iter { - stmt_info: &self.stmt_info, - iter: self.data.iter(), - } - } -} - -impl<'a> IntoIterator for &'a Rows { - type Item = Row<'a>; - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Iter<'a> { - self.iter() - } -} - -/// An iterator over `Row`s. -pub struct Iter<'a> { - stmt_info: &'a StatementInfo, - iter: slice::Iter<'a, RowData>, -} - -impl<'a> Iterator for Iter<'a> { - type Item = Row<'a>; - - fn next(&mut self) -> Option> { - self.iter.next().map(|row| Row { - stmt_info: self.stmt_info, - data: MaybeOwned::Borrowed(row), - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a> DoubleEndedIterator for Iter<'a> { - fn next_back(&mut self) -> Option> { - self.iter.next_back().map(|row| Row { - stmt_info: self.stmt_info, - data: MaybeOwned::Borrowed(row), - }) - } -} - -impl<'a> ExactSizeIterator for Iter<'a> {} - -/// A single result row of a query. -pub struct Row<'a> { - stmt_info: &'a StatementInfo, - data: MaybeOwned<'a, RowData>, -} - -impl<'a> fmt::Debug for Row<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Row") - .field("statement", self.stmt_info) - .finish() - } -} - -impl<'a> Row<'a> { - /// Returns the number of values in the row. - pub fn len(&self) -> usize { - self.data.len() - } - - /// Determines if there are any values in the row. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a slice describing the columns of the `Row`. - pub fn columns(&self) -> &[Column] { - &self.stmt_info.columns[..] - } - - /// Retrieves the contents of a field of the row. - /// - /// A field can be accessed by the name or index of its column, though - /// access by index is more efficient. Rows are 0-indexed. - /// - /// # Panics - /// - /// Panics if the index does not reference a column or the return type is - /// not compatible with the Postgres type. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// let stmt = conn.prepare("SELECT foo, bar from BAZ").unwrap(); - /// for row in &stmt.query(&[]).unwrap() { - /// let foo: i32 = row.get(0); - /// let bar: String = row.get("bar"); - /// println!("{}: {}", foo, bar); - /// } - /// ``` - pub fn get<'b, I, T>(&'b self, idx: I) -> T - where - I: RowIndex + fmt::Debug, - T: FromSql<'b>, - { - match self.get_inner(&idx) { - Some(Ok(ok)) => ok, - Some(Err(err)) => panic!("error retrieving column {:?}: {:?}", idx, err), - None => panic!("no such column {:?}", idx), - } - } - - /// Retrieves the contents of a field of the row. - /// - /// A field can be accessed by the name or index of its column, though - /// access by index is more efficient. Rows are 0-indexed. - /// - /// Returns `None` if the index does not reference a column, `Some(Err(..))` - /// if there was an error converting the result value, and `Some(Ok(..))` - /// on success. - pub fn get_opt<'b, I, T>(&'b self, idx: I) -> Option> - where - I: RowIndex, - T: FromSql<'b>, - { - self.get_inner(&idx) - } - - fn get_inner<'b, I, T>(&'b self, idx: &I) -> Option> - where - I: RowIndex, - T: FromSql<'b>, - { - let idx = match idx.__idx(&self.stmt_info.columns) { - Some(idx) => idx, - None => return None, - }; - - let ty = self.stmt_info.columns[idx].type_(); - if !::accepts(ty) { - return Some(Err(error::conversion(Box::new(WrongType::new(ty.clone()))))); - } - let value = FromSql::from_sql_nullable(ty, self.data.get(idx)); - Some(value.map_err(error::conversion)) - } -} - -/// A lazily-loaded iterator over the resulting rows of a query. -pub struct LazyRows<'trans, 'stmt> { - stmt: &'stmt Statement<'stmt>, - data: VecDeque, - name: String, - row_limit: i32, - more_rows: bool, - finished: bool, - _trans: &'trans Transaction<'trans>, -} - -impl<'a, 'b> Drop for LazyRows<'a, 'b> { - fn drop(&mut self) { - if !self.finished { - let _ = self.finish_inner(); - } - } -} - -impl<'a, 'b> fmt::Debug for LazyRows<'a, 'b> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("LazyRows") - .field("name", &self.name) - .field("row_limit", &self.row_limit) - .field("remaining_rows", &self.data.len()) - .field("more_rows", &self.more_rows) - .finish() - } -} - -impl<'trans, 'stmt> LazyRows<'trans, 'stmt> { - pub(crate) fn new( - stmt: &'stmt Statement<'stmt>, - data: VecDeque, - name: String, - row_limit: i32, - more_rows: bool, - finished: bool, - trans: &'trans Transaction<'trans>, - ) -> LazyRows<'trans, 'stmt> { - LazyRows { - stmt: stmt, - data: data, - name: name, - row_limit: row_limit, - more_rows: more_rows, - finished: finished, - _trans: trans, - } - } - - fn finish_inner(&mut self) -> Result<()> { - let mut conn = self.stmt.conn().0.borrow_mut(); - check_desync!(conn); - conn.close_statement(&self.name, b'P') - } - - fn execute(&mut self) -> Result<()> { - let mut conn = self.stmt.conn().0.borrow_mut(); - - conn.stream - .write_message(|buf| frontend::execute(&self.name, self.row_limit, buf))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - conn.stream.flush()?; - conn.read_rows(|row| self.data.push_back(row)) - .map(|more_rows| self.more_rows = more_rows) - } - - /// Returns a slice describing the columns of the `LazyRows`. - pub fn columns(&self) -> &[Column] { - self.stmt.columns() - } - - /// Consumes the `LazyRows`, cleaning up associated state. - /// - /// Functionally identical to the `Drop` implementation on `LazyRows` - /// except that it returns any error to the caller. - pub fn finish(mut self) -> Result<()> { - self.finish_inner() - } -} - -impl<'trans, 'stmt> FallibleIterator for LazyRows<'trans, 'stmt> { - type Item = Row<'stmt>; - type Error = Error; - - fn next(&mut self) -> Result>> { - if self.data.is_empty() && self.more_rows { - self.execute()?; - } - - let row = self.data.pop_front().map(|r| Row { - stmt_info: &**self.stmt.info(), - data: MaybeOwned::Owned(r), - }); - - Ok(row) - } - - fn size_hint(&self) -> (usize, Option) { - let lower = self.data.len(); - let upper = if self.more_rows { None } else { Some(lower) }; - (lower, upper) - } -} diff --git a/postgres/src/stmt.rs b/postgres/src/stmt.rs deleted file mode 100644 index 640cbcb28..000000000 --- a/postgres/src/stmt.rs +++ /dev/null @@ -1,605 +0,0 @@ -//! Prepared statements - -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::{backend, frontend}; -use postgres_shared::rows::RowData; -use std::cell::Cell; -use std::collections::VecDeque; -use std::fmt; -use std::io::{self, Read, Write}; -use std::sync::Arc; - -#[doc(inline)] -pub use postgres_shared::stmt::Column; - -use rows::{LazyRows, Rows}; -use transaction::Transaction; -use types::{ToSql, Type}; -use {bad_response, err, Connection, Result, StatementInfo}; - -/// A prepared statement. -pub struct Statement<'conn> { - conn: &'conn Connection, - info: Arc, - next_portal_id: Cell, - finished: bool, -} - -impl<'a> fmt::Debug for Statement<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&*self.info, fmt) - } -} - -impl<'conn> Drop for Statement<'conn> { - fn drop(&mut self) { - let _ = self.finish_inner(); - } -} - -impl<'conn> Statement<'conn> { - pub(crate) fn new( - conn: &'conn Connection, - info: Arc, - next_portal_id: Cell, - finished: bool, - ) -> Statement<'conn> { - Statement { - conn: conn, - info: info, - next_portal_id: next_portal_id, - finished: finished, - } - } - - pub(crate) fn info(&self) -> &Arc { - &self.info - } - - pub(crate) fn conn(&self) -> &'conn Connection { - self.conn - } - - pub(crate) fn into_query(self, params: &[&ToSql]) -> Result { - check_desync!(self.conn); - let mut rows = vec![]; - self.inner_query("", 0, params, |row| rows.push(row))?; - Ok(Rows::new(&self, rows)) - } - - fn finish_inner(&mut self) -> Result<()> { - if self.finished { - Ok(()) - } else { - self.finished = true; - let mut conn = self.conn.0.borrow_mut(); - check_desync!(conn); - conn.close_statement(&self.info.name, b'S') - } - } - - #[allow(type_complexity)] - fn inner_query( - &self, - portal_name: &str, - row_limit: i32, - params: &[&ToSql], - acceptor: F, - ) -> Result - where - F: FnMut(RowData), - { - let mut conn = self.conn.0.borrow_mut(); - - conn.raw_execute( - &self.info.name, - portal_name, - row_limit, - self.param_types(), - params, - )?; - - conn.read_rows(acceptor) - } - - /// Returns a slice containing the expected parameter types. - pub fn param_types(&self) -> &[Type] { - &self.info.param_types - } - - /// Returns a slice describing the columns of the result of the query. - pub fn columns(&self) -> &[Column] { - &self.info.columns - } - - /// Executes the prepared statement, returning the number of rows modified. - /// - /// If the statement does not modify any rows (e.g. SELECT), 0 is returned. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number - /// expected. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// # let bar = 1i32; - /// # let baz = true; - /// let stmt = conn.prepare("UPDATE foo SET bar = $1 WHERE baz = $2").unwrap(); - /// let rows_updated = stmt.execute(&[&bar, &baz]).unwrap(); - /// println!("{} rows updated", rows_updated); - /// ``` - pub fn execute(&self, params: &[&ToSql]) -> Result { - let mut conn = self.conn.0.borrow_mut(); - check_desync!(conn); - conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; - - let num; - loop { - match conn.read_message()? { - backend::Message::DataRow(_) => {} - backend::Message::ErrorResponse(body) => { - conn.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - backend::Message::CommandComplete(body) => { - num = parse_update_count(body.tag()?); - break; - } - backend::Message::EmptyQueryResponse => { - num = 0; - break; - } - backend::Message::CopyInResponse(_) => { - conn.stream.write_message(|buf| { - frontend::copy_fail("COPY queries cannot be directly executed", buf) - })?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - conn.stream.flush()?; - } - backend::Message::CopyOutResponse(_) => { - loop { - match conn.read_message()? { - backend::Message::CopyDone => break, - backend::Message::ErrorResponse(body) => { - conn.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => {} - } - } - num = 0; - break; - } - _ => { - conn.desynchronized = true; - return Err(bad_response().into()); - } - } - } - conn.wait_for_ready()?; - - Ok(num) - } - - /// Executes the prepared statement, returning the resulting rows. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number - /// expected. - /// - /// # Example - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// let stmt = conn.prepare("SELECT foo FROM bar WHERE baz = $1").unwrap(); - /// # let baz = true; - /// for row in &stmt.query(&[&baz]).unwrap() { - /// let foo: i32 = row.get("foo"); - /// println!("foo: {}", foo); - /// } - /// ``` - pub fn query(&self, params: &[&ToSql]) -> Result { - check_desync!(self.conn); - let mut rows = vec![]; - self.inner_query("", 0, params, |row| rows.push(row))?; - Ok(Rows::new(self, rows)) - } - - /// Executes the prepared statement, returning a lazily loaded iterator - /// over the resulting rows. - /// - /// No more than `row_limit` rows will be stored in memory at a time. Rows - /// will be pulled from the database in batches of `row_limit` as needed. - /// If `row_limit` is less than or equal to 0, `lazy_query` is equivalent - /// to `query`. - /// - /// This can only be called inside of a transaction, and the `Transaction` - /// object representing the active transaction must be passed to - /// `lazy_query`. - /// - /// # Panics - /// - /// Panics if the provided `Transaction` is not associated with the same - /// `Connection` as this `Statement`, if the `Transaction` is not - /// active, or if the number of parameters provided does not match the - /// number of parameters expected. - /// - /// # Examples - /// - /// ```no_run - /// extern crate fallible_iterator; - /// extern crate postgres; - /// - /// use fallible_iterator::FallibleIterator; - /// # use postgres::{Connection, TlsMode}; - /// - /// # fn main() { - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// let stmt = conn.prepare("SELECT foo FROM bar WHERE baz = $1").unwrap(); - /// let trans = conn.transaction().unwrap(); - /// # let baz = true; - /// let mut rows = stmt.lazy_query(&trans, &[&baz], 100).unwrap(); - /// - /// while let Some(row) = rows.next().unwrap() { - /// let foo: i32 = row.get("foo"); - /// println!("foo: {}", foo); - /// } - /// # } - /// ``` - pub fn lazy_query<'trans, 'stmt>( - &'stmt self, - trans: &'trans Transaction, - params: &[&ToSql], - row_limit: i32, - ) -> Result> { - assert!( - self.conn as *const _ == trans.conn() as *const _, - "the `Transaction` passed to `lazy_query` must be associated with the same \ - `Connection` as the `Statement`" - ); - let conn = self.conn.0.borrow(); - check_desync!(conn); - assert!( - conn.trans_depth == trans.depth(), - "`lazy_query` must be passed the active transaction" - ); - drop(conn); - - let id = self.next_portal_id.get(); - self.next_portal_id.set(id + 1); - let portal_name = format!("{}p{}", self.info.name, id); - - let mut rows = VecDeque::new(); - let more_rows = - self.inner_query(&portal_name, row_limit, params, |row| rows.push_back(row))?; - Ok(LazyRows::new( - self, - rows, - portal_name, - row_limit, - more_rows, - false, - trans, - )) - } - - /// Executes a `COPY FROM STDIN` statement, returning the number of rows - /// added. - /// - /// The contents of the provided reader are passed to the Postgres server - /// verbatim; it is the caller's responsibility to ensure it uses the - /// proper format. See the - /// [Postgres documentation](http://www.postgresql.org/docs/9.4/static/sql-copy.html) - /// for details. - /// - /// If the statement is not a `COPY FROM STDIN` statement it will still be - /// executed and this method will return an error. - /// - /// # Examples - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// conn.batch_execute("CREATE TABLE people (id INT PRIMARY KEY, name VARCHAR)").unwrap(); - /// let stmt = conn.prepare("COPY people FROM STDIN").unwrap(); - /// stmt.copy_in(&[], &mut "1\tjohn\n2\tjane\n".as_bytes()).unwrap(); - /// ``` - pub fn copy_in(&self, params: &[&ToSql], r: &mut R) -> Result { - let mut conn = self.conn.0.borrow_mut(); - conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; - - let (format, column_formats) = match conn.read_message()? { - backend::Message::CopyInResponse(body) => { - let format = body.format(); - let column_formats = body - .column_formats() - .map(|f| Format::from_u16(f)) - .collect()?; - (format, column_formats) - } - backend::Message::ErrorResponse(body) => { - conn.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_in` on a non-`COPY FROM STDIN` statement", - ).into()); - } - }, - }; - - let info = CopyInfo { - format: Format::from_u16(format as u16), - column_formats: column_formats, - }; - - let mut buf = [0; 16 * 1024]; - loop { - match fill_copy_buf(&mut buf, r, &info) { - Ok(0) => break, - Ok(len) => { - conn.stream - .write_message(|out| frontend::copy_data(&buf[..len], out))?; - } - Err(err) => { - conn.stream - .write_message(|buf| frontend::copy_fail("", buf))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - conn.stream.flush()?; - match conn.read_message()? { - backend::Message::ErrorResponse(_) => { - // expected from the CopyFail - } - _ => { - conn.desynchronized = true; - return Err(bad_response().into()); - } - } - conn.wait_for_ready()?; - return Err(err.into()); - } - } - } - - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - conn.stream.flush()?; - - let num = match conn.read_message()? { - backend::Message::CommandComplete(body) => parse_update_count(body.tag()?), - backend::Message::ErrorResponse(body) => { - conn.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => { - conn.desynchronized = true; - return Err(bad_response().into()); - } - }; - - conn.wait_for_ready()?; - Ok(num) - } - - /// Executes a `COPY TO STDOUT` statement, passing the resulting data to - /// the provided writer and returning the number of rows received. - /// - /// See the [Postgres documentation](http://www.postgresql.org/docs/9.4/static/sql-copy.html) - /// for details on the data format. - /// - /// If the statement is not a `COPY TO STDOUT` statement it will still be - /// executed and this method will return an error. - /// - /// # Examples - /// - /// ```rust,no_run - /// # use postgres::{Connection, TlsMode}; - /// # let conn = Connection::connect("", TlsMode::None).unwrap(); - /// conn.batch_execute(" - /// CREATE TABLE people (id INT PRIMARY KEY, name VARCHAR); - /// INSERT INTO people (id, name) VALUES (1, 'john'), (2, 'jane');").unwrap(); - /// let stmt = conn.prepare("COPY people TO STDOUT").unwrap(); - /// let mut buf = vec![]; - /// stmt.copy_out(&[], &mut buf).unwrap(); - /// assert_eq!(buf, b"1\tjohn\n2\tjane\n"); - /// ``` - pub fn copy_out<'a, W: WriteWithInfo>(&'a self, params: &[&ToSql], w: &mut W) -> Result { - let mut conn = self.conn.0.borrow_mut(); - conn.raw_execute(&self.info.name, "", 0, self.param_types(), params)?; - - let (format, column_formats) = match conn.read_message()? { - backend::Message::CopyOutResponse(body) => { - let format = body.format(); - let column_formats = body - .column_formats() - .map(|f| Format::from_u16(f)) - .collect()?; - (format, column_formats) - } - backend::Message::CopyInResponse(_) => { - conn.stream - .write_message(|buf| frontend::copy_fail("", buf))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::copy_done(buf)))?; - conn.stream - .write_message(|buf| Ok::<(), io::Error>(frontend::sync(buf)))?; - conn.stream.flush()?; - match conn.read_message()? { - backend::Message::ErrorResponse(_) => { - // expected from the CopyFail - } - _ => { - conn.desynchronized = true; - return Err(bad_response().into()); - } - } - conn.wait_for_ready()?; - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_out` on a non-`COPY TO STDOUT` statement", - ).into()); - } - backend::Message::ErrorResponse(body) => { - conn.wait_for_ready()?; - return Err(err(&mut body.fields())); - } - _ => loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "called `copy_out` on a non-`COPY TO STDOUT` statement", - ).into()); - } - }, - }; - - let info = CopyInfo { - format: Format::from_u16(format as u16), - column_formats: column_formats, - }; - - let count; - loop { - match conn.read_message()? { - backend::Message::CopyData(body) => { - let mut data = body.data(); - while !data.is_empty() { - match w.write_with_info(data, &info) { - Ok(n) => data = &data[n..], - Err(e) => loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(e.into()); - } - }, - } - } - } - backend::Message::CopyDone => {} - backend::Message::CommandComplete(body) => { - count = parse_update_count(body.tag()?); - break; - } - backend::Message::ErrorResponse(body) => loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(err(&mut body.fields())); - } - }, - _ => loop { - if let backend::Message::ReadyForQuery(_) = conn.read_message()? { - return Err(bad_response().into()); - } - }, - } - } - - conn.wait_for_ready()?; - Ok(count) - } - - /// Consumes the statement, clearing it from the Postgres session. - /// - /// If this statement was created via the `prepare_cached` method, `finish` - /// does nothing. - /// - /// Functionally identical to the `Drop` implementation of the - /// `Statement` except that it returns any error to the caller. - pub fn finish(mut self) -> Result<()> { - self.finish_inner() - } -} - -fn fill_copy_buf(buf: &mut [u8], r: &mut R, info: &CopyInfo) -> io::Result { - let mut nread = 0; - while nread < buf.len() { - match r.read_with_info(&mut buf[nread..], info) { - Ok(0) => break, - Ok(n) => nread += n, - Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} - Err(e) => return Err(e), - } - } - Ok(nread) -} - -/// A struct containing information relevant for a `COPY` operation. -pub struct CopyInfo { - format: Format, - column_formats: Vec, -} - -impl CopyInfo { - /// Returns the format of the overall data. - pub fn format(&self) -> Format { - self.format - } - - /// Returns the format of the individual columns. - pub fn column_formats(&self) -> &[Format] { - &self.column_formats - } -} - -/// Like `Read` except that a `CopyInfo` object is provided as well. -/// -/// All types that implement `Read` also implement this trait. -pub trait ReadWithInfo { - /// Like `Read::read`. - fn read_with_info(&mut self, buf: &mut [u8], info: &CopyInfo) -> io::Result; -} - -impl ReadWithInfo for R { - fn read_with_info(&mut self, buf: &mut [u8], _: &CopyInfo) -> io::Result { - self.read(buf) - } -} - -/// Like `Write` except that a `CopyInfo` object is provided as well. -/// -/// All types that implement `Write` also implement this trait. -pub trait WriteWithInfo { - /// Like `Write::write`. - fn write_with_info(&mut self, buf: &[u8], info: &CopyInfo) -> io::Result; -} - -impl WriteWithInfo for W { - fn write_with_info(&mut self, buf: &[u8], _: &CopyInfo) -> io::Result { - self.write(buf) - } -} - -/// The format of a portion of COPY query data. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Format { - /// A text based format. - Text, - /// A binary format. - Binary, -} - -impl Format { - fn from_u16(value: u16) -> Format { - match value { - 0 => Format::Text, - _ => Format::Binary, - } - } -} - -fn parse_update_count(tag: &str) -> u64 { - tag.split(' ').last().unwrap().parse().unwrap_or(0) -} diff --git a/postgres/src/text_rows.rs b/postgres/src/text_rows.rs deleted file mode 100644 index 678e98a57..000000000 --- a/postgres/src/text_rows.rs +++ /dev/null @@ -1,191 +0,0 @@ -//! Query result rows. - -use postgres_shared::rows::RowData; -use std::fmt; -use std::slice; -use std::str; - -#[doc(inline)] -pub use postgres_shared::rows::RowIndex; - -use stmt::Column; -use {error, Result}; - -/// The resulting rows of a query. -pub struct TextRows { - columns: Vec, - data: Vec, -} - -impl fmt::Debug for TextRows { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("TextRows") - .field("columns", &self.columns()) - .field("rows", &self.data.len()) - .finish() - } -} - -impl TextRows { - pub(crate) fn new(columns: Vec, data: Vec) -> TextRows { - TextRows { - columns: columns, - data: data, - } - } - - /// Returns a slice describing the columns of the `TextRows`. - pub fn columns(&self) -> &[Column] { - &self.columns[..] - } - - /// Returns the number of rows present. - pub fn len(&self) -> usize { - self.data.len() - } - - /// Determines if there are any rows present. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a specific `TextRow`. - /// - /// # Panics - /// - /// Panics if `idx` is out of bounds. - pub fn get<'a>(&'a self, idx: usize) -> TextRow<'a> { - TextRow { - columns: &self.columns, - data: &self.data[idx], - } - } - - /// Returns an iterator over the `TextRow`s. - pub fn iter<'a>(&'a self) -> Iter<'a> { - Iter { - columns: self.columns(), - iter: self.data.iter(), - } - } -} - -impl<'a> IntoIterator for &'a TextRows { - type Item = TextRow<'a>; - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Iter<'a> { - self.iter() - } -} - -/// An iterator over `TextRow`s. -pub struct Iter<'a> { - columns: &'a [Column], - iter: slice::Iter<'a, RowData>, -} - -impl<'a> Iterator for Iter<'a> { - type Item = TextRow<'a>; - - fn next(&mut self) -> Option> { - self.iter.next().map(|row| TextRow { - columns: self.columns, - data: row, - }) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a> DoubleEndedIterator for Iter<'a> { - fn next_back(&mut self) -> Option> { - self.iter.next_back().map(|row| TextRow { - columns: self.columns, - data: row, - }) - } -} - -impl<'a> ExactSizeIterator for Iter<'a> {} - -/// A single result row of a query. -pub struct TextRow<'a> { - columns: &'a [Column], - data: &'a RowData, -} - -impl<'a> fmt::Debug for TextRow<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("TextRow") - .field("columns", &self.columns) - .finish() - } -} - -impl<'a> TextRow<'a> { - /// Returns the number of values in the row. - pub fn len(&self) -> usize { - self.data.len() - } - - /// Determines if there are any values in the row. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a slice describing the columns of the `TextRow`. - pub fn columns(&self) -> &[Column] { - self.columns - } - - /// Retrieve the contents of a field of a row - /// - /// A field can be accessed by the name or index of its column, though - /// access by index is more efficient. Rows are 0-indexed. - /// - /// # Panics - /// - /// Panics if the index does not reference a column - pub fn get(&self, idx: I) -> &str - where - I: RowIndex + fmt::Debug, - { - match self.get_inner(&idx) { - Some(Ok(value)) => value, - Some(Err(err)) => panic!("error retrieving column {:?}: {:?}", idx, err), - None => panic!("no such column {:?}", idx), - } - } - - /// Retrieves the contents of a field of the row. - /// - /// A field can be accessed by the name or index of its column, though - /// access by index is more efficient. Rows are 0-indexed. - /// - /// Returns None if the index does not reference a column, Some(Err(..)) if - /// there was an error parsing the result as UTF-8, and Some(Ok(..)) on - /// success. - pub fn get_opt(&self, idx: I) -> Option> - where - I: RowIndex, - { - self.get_inner(&idx) - } - - fn get_inner(&self, idx: &I) -> Option> - where - I: RowIndex, - { - let idx = match idx.__idx(self.columns) { - Some(idx) => idx, - None => return None, - }; - - self.data - .get(idx) - .map(|s| str::from_utf8(s).map_err(|e| error::conversion(Box::new(e)))) - } -} diff --git a/postgres/src/tls.rs b/postgres/src/tls.rs deleted file mode 100644 index 8d425285a..000000000 --- a/postgres/src/tls.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! Types and traits for TLS support. -pub use priv_io::Stream; - -use std::error::Error; -use std::fmt; -use std::io::prelude::*; - -/// A trait implemented by TLS streams. -pub trait TlsStream: fmt::Debug + Read + Write + Send { - /// Returns a reference to the underlying `Stream`. - fn get_ref(&self) -> &Stream; - - /// Returns a mutable reference to the underlying `Stream`. - fn get_mut(&mut self) -> &mut Stream; - - /// Returns the data associated with the `tls-server-end-point` channel binding type as - /// described in [RFC 5929], if supported. - /// - /// An implementation only needs to support one of this or `tls_unique`. - /// - /// [RFC 5929]: https://tools.ietf.org/html/rfc5929 - fn tls_server_end_point(&self) -> Option> { - None - } -} - -/// A trait implemented by types that can initiate a TLS session over a Postgres -/// stream. -pub trait TlsHandshake: fmt::Debug { - /// Performs a client-side TLS handshake, returning a wrapper around the - /// provided stream. - /// - /// The host portion of the connection parameters is provided for hostname - /// verification. - fn tls_handshake( - &self, - host: &str, - stream: Stream, - ) -> Result, Box>; -} - -impl TlsHandshake for Box { - fn tls_handshake( - &self, - host: &str, - stream: Stream, - ) -> Result, Box> { - (**self).tls_handshake(host, stream) - } -} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs deleted file mode 100644 index 3769c1a84..000000000 --- a/postgres/src/transaction.rs +++ /dev/null @@ -1,327 +0,0 @@ -//! Transactions - -use std::cell::Cell; -use std::fmt; - -use rows::Rows; -use stmt::Statement; -use text_rows::TextRows; -use types::ToSql; -use {bad_response, Connection, Result}; - -/// An enumeration of transaction isolation levels. -/// -/// See the [Postgres documentation](http://www.postgresql.org/docs/9.4/static/transaction-iso.html) -/// for full details on the semantics of each level. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum IsolationLevel { - /// The "read uncommitted" level. - /// - /// In current versions of Postgres, this behaves identically to - /// `ReadCommitted`. - ReadUncommitted, - /// The "read committed" level. - /// - /// This is the default isolation level in Postgres. - ReadCommitted, - /// The "repeatable read" level. - RepeatableRead, - /// The "serializable" level. - Serializable, -} - -impl IsolationLevel { - pub(crate) fn new(raw: &str) -> Result { - if raw.eq_ignore_ascii_case("READ UNCOMMITTED") { - Ok(IsolationLevel::ReadUncommitted) - } else if raw.eq_ignore_ascii_case("READ COMMITTED") { - Ok(IsolationLevel::ReadCommitted) - } else if raw.eq_ignore_ascii_case("REPEATABLE READ") { - Ok(IsolationLevel::RepeatableRead) - } else if raw.eq_ignore_ascii_case("SERIALIZABLE") { - Ok(IsolationLevel::Serializable) - } else { - Err(bad_response().into()) - } - } - - fn to_sql(&self) -> &'static str { - match *self { - IsolationLevel::ReadUncommitted => "READ UNCOMMITTED", - IsolationLevel::ReadCommitted => "READ COMMITTED", - IsolationLevel::RepeatableRead => "REPEATABLE READ", - IsolationLevel::Serializable => "SERIALIZABLE", - } - } -} - -/// Configuration of a transaction. -#[derive(Debug)] -pub struct Config { - isolation_level: Option, - read_only: Option, - deferrable: Option, -} - -impl Default for Config { - fn default() -> Config { - Config { - isolation_level: None, - read_only: None, - deferrable: None, - } - } -} - -impl Config { - pub(crate) fn build_command(&self, s: &mut String) { - let mut first = true; - - if let Some(isolation_level) = self.isolation_level { - s.push_str(" ISOLATION LEVEL "); - s.push_str(isolation_level.to_sql()); - first = false; - } - - if let Some(read_only) = self.read_only { - if !first { - s.push(','); - } - if read_only { - s.push_str(" READ ONLY"); - } else { - s.push_str(" READ WRITE"); - } - first = false; - } - - if let Some(deferrable) = self.deferrable { - if !first { - s.push(','); - } - if deferrable { - s.push_str(" DEFERRABLE"); - } else { - s.push_str(" NOT DEFERRABLE"); - } - } - } - - /// Creates a new `Config` with no configuration overrides. - pub fn new() -> Config { - Config::default() - } - - /// Sets the isolation level of the configuration. - pub fn isolation_level(&mut self, isolation_level: IsolationLevel) -> &mut Config { - self.isolation_level = Some(isolation_level); - self - } - - /// Sets the read-only property of a transaction. - /// - /// If enabled, a transaction will be unable to modify any persistent - /// database state. - pub fn read_only(&mut self, read_only: bool) -> &mut Config { - self.read_only = Some(read_only); - self - } - - /// Sets the deferrable property of a transaction. - /// - /// If enabled in a read only, serializable transaction, the transaction may - /// block when created, after which it will run without the normal overhead - /// of a serializable transaction and will not be forced to roll back due - /// to serialization failures. - pub fn deferrable(&mut self, deferrable: bool) -> &mut Config { - self.deferrable = Some(deferrable); - self - } -} - -/// A transaction on a database connection. -/// -/// The transaction will roll back by default. -pub struct Transaction<'conn> { - conn: &'conn Connection, - depth: u32, - savepoint_name: Option, - commit: Cell, - finished: bool, -} - -impl<'a> fmt::Debug for Transaction<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Transaction") - .field("commit", &self.commit.get()) - .field("depth", &self.depth) - .finish() - } -} - -impl<'conn> Drop for Transaction<'conn> { - fn drop(&mut self) { - if !self.finished { - let _ = self.finish_inner(); - } - } -} - -impl<'conn> Transaction<'conn> { - pub(crate) fn new(conn: &'conn Connection, depth: u32) -> Transaction<'conn> { - Transaction { - conn: conn, - depth: depth, - savepoint_name: None, - commit: Cell::new(false), - finished: false, - } - } - - pub(crate) fn conn(&self) -> &'conn Connection { - self.conn - } - - pub(crate) fn depth(&self) -> u32 { - self.depth - } - - fn finish_inner(&mut self) -> Result<()> { - let mut conn = self.conn.0.borrow_mut(); - debug_assert!(self.depth == conn.trans_depth); - conn.trans_depth -= 1; - match (self.commit.get(), &self.savepoint_name) { - (false, &Some(ref sp)) => conn.quick_query(&format!("ROLLBACK TO {}", sp))?, - (false, &None) => conn.quick_query("ROLLBACK")?, - (true, &Some(ref sp)) => conn.quick_query(&format!("RELEASE {}", sp))?, - (true, &None) => conn.quick_query("COMMIT")?, - }; - - Ok(()) - } - - /// Like `Connection::prepare`. - pub fn prepare(&self, query: &str) -> Result> { - self.conn.prepare(query) - } - - /// Like `Connection::prepare_cached`. - /// - /// # Note - /// - /// The statement will be cached for the duration of the - /// connection, not just the duration of this transaction. - pub fn prepare_cached(&self, query: &str) -> Result> { - self.conn.prepare_cached(query) - } - - /// Like `Connection::execute`. - pub fn execute(&self, query: &str, params: &[&ToSql]) -> Result { - self.conn.execute(query, params) - } - - /// Like `Connection::query`. - pub fn query<'a>(&'a self, query: &str, params: &[&ToSql]) -> Result { - self.conn.query(query, params) - } - - /// Like `Connection::batch_execute`. - #[deprecated(since = "0.15.3", note = "please use `simple_query` instead")] - pub fn batch_execute(&self, query: &str) -> Result<()> { - self.simple_query(query).map(|_| ()) - } - - /// Like `Connection::simple_query`. - pub fn simple_query(&self, query: &str) -> Result> { - self.conn.simple_query(query) - } - - /// Like `Connection::transaction`, but creates a nested transaction via - /// a savepoint. - /// - /// # Panics - /// - /// Panics if there is an active nested transaction. - pub fn transaction<'a>(&'a self) -> Result> { - self.savepoint(format!("sp_{}", self.depth())) - } - - /// Like `Connection::transaction`, but creates a nested transaction via - /// a savepoint with the specified name. - /// - /// # Panics - /// - /// Panics if there is an active nested transaction. - #[inline] - pub fn savepoint<'a, I>(&'a self, name: I) -> Result> - where - I: Into, - { - self._savepoint(name.into()) - } - - fn _savepoint<'a>(&'a self, name: String) -> Result> { - let mut conn = self.conn.0.borrow_mut(); - check_desync!(conn); - assert!( - conn.trans_depth == self.depth, - "`savepoint` may only be called on the active transaction" - ); - conn.quick_query(&format!("SAVEPOINT {}", name))?; - conn.trans_depth += 1; - Ok(Transaction { - conn: self.conn, - depth: self.depth + 1, - savepoint_name: Some(name), - commit: Cell::new(false), - finished: false, - }) - } - - /// Returns a reference to the `Transaction`'s `Connection`. - pub fn connection(&self) -> &'conn Connection { - self.conn - } - - /// Like `Connection::is_active`. - pub fn is_active(&self) -> bool { - self.conn.0.borrow().trans_depth == self.depth - } - - /// Alters the configuration of the active transaction. - pub fn set_config(&self, config: &Config) -> Result<()> { - let mut command = "SET TRANSACTION".to_owned(); - config.build_command(&mut command); - self.simple_query(&command).map(|_| ()) - } - - /// Determines if the transaction is currently set to commit or roll back. - pub fn will_commit(&self) -> bool { - self.commit.get() - } - - /// Sets the transaction to commit at its completion. - pub fn set_commit(&self) { - self.commit.set(true); - } - - /// Sets the transaction to roll back at its completion. - pub fn set_rollback(&self) { - self.commit.set(false); - } - - /// A convenience method which consumes and commits a transaction. - pub fn commit(self) -> Result<()> { - self.set_commit(); - self.finish() - } - - /// Consumes the transaction, committing or rolling it back as appropriate. - /// - /// Functionally equivalent to the `Drop` implementation of `Transaction` - /// except that it returns any error to the caller. - pub fn finish(mut self) -> Result<()> { - self.finished = true; - self.finish_inner() - } -} diff --git a/postgres/tests/test.rs b/postgres/tests/test.rs deleted file mode 100644 index 3b8ee3844..000000000 --- a/postgres/tests/test.rs +++ /dev/null @@ -1,1481 +0,0 @@ -extern crate fallible_iterator; -extern crate postgres; -extern crate url; - -#[macro_use] -extern crate postgres_shared; - -use fallible_iterator::FallibleIterator; -use postgres::error::ErrorPosition::Normal; -use postgres::error::{DbError, SqlState}; -use postgres::notification::Notification; -use postgres::params::IntoConnectParams; -use postgres::transaction::{self, IsolationLevel}; -use postgres::types::{Kind, Oid, Type, WrongType}; -use postgres::{Connection, GenericConnection, HandleNotice, TlsMode}; -use std::io; -use std::thread; -use std::time::Duration; - -macro_rules! or_panic { - ($e:expr) => { - match $e { - Ok(ok) => ok, - Err(err) => panic!("{:#?}", err), - } - }; -} - -mod types; - -#[test] -fn test_non_default_database() { - or_panic!(Connection::connect( - "postgres://postgres@localhost:5433/postgres", - TlsMode::None, - )); -} - -#[test] -fn test_url_terminating_slash() { - or_panic!(Connection::connect( - "postgres://postgres@localhost:5433/", - TlsMode::None, - )); -} - -#[test] -fn test_prepare_err() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let err = conn.prepare("invalid sql database").unwrap_err(); - match err.as_db() { - Some(e) if e.code == SqlState::SYNTAX_ERROR && e.position == Some(Normal(1)) => {} - _ => panic!("Unexpected result {:?}", err), - } -} - -#[test] -fn test_unknown_database() { - match Connection::connect("postgres://postgres@localhost:5433/asdf", TlsMode::None) { - Err(ref e) if e.code() == Some(&SqlState::INVALID_CATALOG_NAME) => {} - Err(resp) => panic!("Unexpected result {:?}", resp), - _ => panic!("Unexpected result"), - } -} - -#[test] -fn test_connection_finish() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert!(conn.finish().is_ok()); -} - -#[test] -#[ignore] // doesn't work on our CI setup -fn test_unix_connection() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SHOW unix_socket_directories")); - let result = or_panic!(stmt.query(&[])); - let unix_socket_directories: String = result.iter().map(|row| row.get(0)).next().unwrap(); - - if unix_socket_directories.is_empty() { - panic!("can't test connect_unix; unix_socket_directories is empty"); - } - - let unix_socket_directory = unix_socket_directories.split(',').next().unwrap(); - - let path = url::percent_encoding::utf8_percent_encode( - unix_socket_directory, - url::percent_encoding::USERINFO_ENCODE_SET, - ); - let url = format!("postgres://postgres@{}", path); - let conn = or_panic!(Connection::connect(&url[..], TlsMode::None)); - assert!(conn.finish().is_ok()); -} - -#[test] -fn test_transaction_commit() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - trans.set_commit(); - drop(trans); - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_transaction_commit_finish() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - trans.set_commit(); - assert!(trans.finish().is_ok()); - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_transaction_commit_method() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - assert!(trans.commit().is_ok()); - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_transaction_rollback() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - or_panic!(conn.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&2i32])); - drop(trans); - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_transaction_rollback_finish() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - or_panic!(conn.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&2i32])); - assert!(trans.finish().is_ok()); - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_nested_transactions() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - or_panic!(conn.execute("INSERT INTO foo (id) VALUES (1)", &[])); - - { - let trans1 = or_panic!(conn.transaction()); - or_panic!(trans1.execute("INSERT INTO foo (id) VALUES (2)", &[])); - - { - let trans2 = or_panic!(trans1.transaction()); - or_panic!(trans2.execute("INSERT INTO foo (id) VALUES (3)", &[])); - } - - { - let trans2 = or_panic!(trans1.transaction()); - or_panic!(trans2.execute("INSERT INTO foo (id) VALUES (4)", &[])); - - { - let trans3 = or_panic!(trans2.transaction()); - or_panic!(trans3.execute("INSERT INTO foo (id) VALUES (5)", &[])); - } - - { - let sp = or_panic!(trans2.savepoint("custom")); - or_panic!(sp.execute("INSERT INTO foo (id) VALUES (6)", &[])); - assert!(sp.commit().is_ok()); - } - - assert!(trans2.commit().is_ok()); - } - - let stmt = or_panic!(trans1.prepare("SELECT * FROM foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32, 2, 4, 6], - result.iter().map(|row| row.get(0)).collect::>() - ); - } - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_nested_transactions_finish() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - or_panic!(conn.execute("INSERT INTO foo (id) VALUES (1)", &[])); - - { - let trans1 = or_panic!(conn.transaction()); - or_panic!(trans1.execute("INSERT INTO foo (id) VALUES (2)", &[])); - - { - let trans2 = or_panic!(trans1.transaction()); - or_panic!(trans2.execute("INSERT INTO foo (id) VALUES (3)", &[])); - assert!(trans2.finish().is_ok()); - } - - { - let trans2 = or_panic!(trans1.transaction()); - or_panic!(trans2.execute("INSERT INTO foo (id) VALUES (4)", &[])); - - { - let trans3 = or_panic!(trans2.transaction()); - or_panic!(trans3.execute("INSERT INTO foo (id) VALUES (5)", &[])); - assert!(trans3.finish().is_ok()); - } - - { - let sp = or_panic!(trans2.savepoint("custom")); - or_panic!(sp.execute("INSERT INTO foo (id) VALUES (6)", &[])); - sp.set_commit(); - assert!(sp.finish().is_ok()); - } - - trans2.set_commit(); - assert!(trans2.finish().is_ok()); - } - - // in a block to unborrow trans1 for the finish call - { - let stmt = or_panic!(trans1.prepare("SELECT * FROM foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32, 2, 4, 6], - result.iter().map(|row| row.get(0)).collect::>() - ); - } - - assert!(trans1.finish().is_ok()); - } - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_nested_transactions_partial_rollback() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - - or_panic!(conn.execute("INSERT INTO foo (id) VALUES ($1)", &[&1i32])); - - { - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&2i32])); - { - let trans = or_panic!(trans.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&3i32])); - { - let trans = or_panic!(trans.transaction()); - or_panic!(trans.execute("INSERT INTO foo (id) VALUES ($1)", &[&4i32])); - drop(trans); - } - drop(trans); - } - or_panic!(trans.commit()); - } - - let stmt = or_panic!(conn.prepare("SELECT * FROM foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i32, 2], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -#[should_panic(expected = "active transaction")] -fn test_conn_trans_when_nested() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let _trans = or_panic!(conn.transaction()); - conn.transaction().unwrap(); -} - -#[test] -#[should_panic(expected = "active transaction")] -fn test_trans_with_nested_trans() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let trans = or_panic!(conn.transaction()); - let _trans2 = or_panic!(trans.transaction()); - trans.transaction().unwrap(); -} - -#[test] -#[should_panic(expected = "active transaction")] -fn test_trans_with_savepoints() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let trans = or_panic!(conn.transaction()); - let _sp = or_panic!(trans.savepoint("custom")); - trans.savepoint("custom2").unwrap(); -} - -#[test] -fn test_stmt_execute_after_transaction() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let trans = or_panic!(conn.transaction()); - let stmt = or_panic!(trans.prepare("SELECT 1")); - or_panic!(trans.finish()); - let result = or_panic!(stmt.query(&[])); - assert_eq!(1i32, result.iter().next().unwrap().get::<_, i32>(0)); -} - -#[test] -fn test_stmt_finish() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id BIGINT PRIMARY KEY)", &[])); - let stmt = or_panic!(conn.prepare("SELECT * FROM foo")); - assert!(stmt.finish().is_ok()); -} - -#[test] -#[allow(deprecated)] -fn test_batch_execute() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let query = "CREATE TEMPORARY TABLE foo (id BIGINT PRIMARY KEY); - INSERT INTO foo (id) VALUES (10);"; - or_panic!(conn.batch_execute(query)); - - let stmt = or_panic!(conn.prepare("SELECT * from foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![10i64], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -#[allow(deprecated)] -fn test_batch_execute_error() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let query = "CREATE TEMPORARY TABLE foo (id BIGINT PRIMARY KEY); - INSERT INTO foo (id) VALUES (10); - asdfa; - INSERT INTO foo (id) VALUES (11)"; - conn.batch_execute(query).err().unwrap(); - - let stmt = conn.prepare("SELECT * FROM foo ORDER BY id"); - match stmt { - Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => {} - Err(e) => panic!("unexpected error {:?}", e), - _ => panic!("unexpected success"), - } -} - -#[test] -#[allow(deprecated)] -fn test_transaction_batch_execute() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let trans = or_panic!(conn.transaction()); - let query = "CREATE TEMPORARY TABLE foo (id BIGINT PRIMARY KEY); - INSERT INTO foo (id) VALUES (10);"; - or_panic!(trans.batch_execute(query)); - - let stmt = or_panic!(trans.prepare("SELECT * from foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![10i64], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_query() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id BIGINT PRIMARY KEY)", &[])); - or_panic!(conn.execute("INSERT INTO foo (id) VALUES ($1), ($2)", &[&1i64, &2i64])); - let stmt = or_panic!(conn.prepare("SELECT * from foo ORDER BY id")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![1i64, 2], - result.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_error_after_datarow() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare( - " -SELECT - (SELECT generate_series(1, ss.i)) -FROM (SELECT gs.i - FROM generate_series(1, 2) gs(i) - ORDER BY gs.i - LIMIT 2) ss", - )); - match stmt.query(&[]) { - Err(ref e) if e.code() == Some(&SqlState::CARDINALITY_VIOLATION) => {} - Err(err) => panic!("Unexpected error {:?}", err), - Ok(_) => panic!("Expected failure"), - }; -} - -#[test] -fn test_lazy_query() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - - let trans = or_panic!(conn.transaction()); - or_panic!(trans.execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)", &[])); - let stmt = or_panic!(trans.prepare("INSERT INTO foo (id) VALUES ($1)")); - let values = vec![0i32, 1, 2, 3, 4, 5]; - for value in &values { - or_panic!(stmt.execute(&[value])); - } - let stmt = or_panic!(trans.prepare("SELECT id FROM foo ORDER BY id")); - let result = or_panic!(stmt.lazy_query(&trans, &[], 2)); - assert_eq!( - values, - result.map(|row| row.get(0)).collect::>().unwrap() - ); -} - -#[test] -#[should_panic(expected = "same `Connection` as")] -fn test_lazy_query_wrong_conn() { - let conn1 = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let conn2 = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - - let trans = or_panic!(conn1.transaction()); - let stmt = or_panic!(conn2.prepare("SELECT 1::INT")); - stmt.lazy_query(&trans, &[], 1).unwrap(); -} - -#[test] -fn test_param_types() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT $1::INT, $2::VARCHAR")); - assert_eq!(stmt.param_types(), &[Type::INT4, Type::VARCHAR][..]); -} - -#[test] -fn test_columns() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT 1::INT as a, 'hi'::VARCHAR as b")); - let cols = stmt.columns(); - assert_eq!(2, cols.len()); - assert_eq!(cols[0].name(), "a"); - assert_eq!(cols[0].type_(), &Type::INT4); - assert_eq!(cols[1].name(), "b"); - assert_eq!(cols[1].type_(), &Type::VARCHAR); -} - -#[test] -fn test_execute_counts() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert_eq!( - 0, - or_panic!(conn.execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - b INT - )", - &[], - )) - ); - assert_eq!( - 3, - or_panic!(conn.execute( - "INSERT INTO foo (b) VALUES ($1), ($2), ($2)", - &[&1i32, &2i32], - )) - ); - assert_eq!( - 2, - or_panic!(conn.execute("UPDATE foo SET b = 0 WHERE b = 2", &[])) - ); - assert_eq!(3, or_panic!(conn.execute("SELECT * FROM foo", &[]))); -} - -#[test] -fn test_wrong_param_type() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let err = conn.execute("SELECT $1::VARCHAR", &[&1i32]).unwrap_err(); - match err.as_conversion() { - Some(e) if e.is::() => {} - _ => panic!("unexpected result {:?}", err), - } -} - -#[test] -#[should_panic(expected = "expected 2 parameters but got 1")] -fn test_too_few_params() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let _ = conn.execute("SELECT $1::INT, $2::INT", &[&1i32]); -} - -#[test] -#[should_panic(expected = "expected 2 parameters but got 3")] -fn test_too_many_params() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let _ = conn.execute("SELECT $1::INT, $2::INT", &[&1i32, &2i32, &3i32]); -} - -#[test] -fn test_index_named() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT 10::INT as val")); - let result = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![10i32], - result - .iter() - .map(|row| row.get("val")) - .collect::>() - ); -} - -#[test] -#[should_panic] -fn test_index_named_fail() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT 10::INT as id")); - let result = or_panic!(stmt.query(&[])); - - let _: i32 = result.iter().next().unwrap().get("asdf"); -} - -#[test] -fn test_get_named_err() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT 10::INT as id")); - let result = or_panic!(stmt.query(&[])); - - match result.iter().next().unwrap().get_opt::<_, i32>("asdf") { - None => {} - res => panic!("unexpected result {:?}", res), - }; -} - -#[test] -fn test_get_was_null() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT NULL::INT as id")); - let result = or_panic!(stmt.query(&[])); - - match result.iter().next().unwrap().get_opt::<_, i32>(0) { - Some(Err(ref e)) if e.as_conversion().is_some() => {} - res => panic!("unexpected result {:?}", res), - }; -} - -#[test] -fn test_get_off_by_one() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT 10::INT as id")); - let result = or_panic!(stmt.query(&[])); - - match result.iter().next().unwrap().get_opt::<_, i32>(1) { - None => {} - res => panic!("unexpected result {:?}", res), - }; -} - -#[test] -fn test_custom_notice_handler() { - static mut COUNT: usize = 0; - struct Handler; - - impl HandleNotice for Handler { - fn handle_notice(&mut self, notice: DbError) { - assert_eq!("note", notice.message); - unsafe { - COUNT += 1; - } - } - } - - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433?client_min_messages=NOTICE", - TlsMode::None, - )); - conn.set_notice_handler(Box::new(Handler)); - or_panic!(conn.execute( - "CREATE FUNCTION pg_temp.note() RETURNS INT AS $$ - BEGIN - RAISE NOTICE 'note'; - RETURN 1; - END; $$ LANGUAGE plpgsql", - &[], - )); - or_panic!(conn.execute("SELECT pg_temp.note()", &[])); - - assert_eq!(unsafe { COUNT }, 1); -} - -#[test] -fn test_notification_iterator_none() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert!(conn.notifications().iter().next().unwrap().is_none()); -} - -fn check_notification(expected: Notification, actual: Notification) { - assert_eq!(&expected.channel, &actual.channel); - assert_eq!(&expected.payload, &actual.payload); -} - -#[test] -fn test_notification_iterator_some() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let notifications = conn.notifications(); - let mut it = notifications.iter(); - or_panic!(conn.execute("LISTEN test_notification_iterator_one_channel", &[])); - or_panic!(conn.execute("LISTEN test_notification_iterator_one_channel2", &[])); - or_panic!(conn.execute( - "NOTIFY test_notification_iterator_one_channel, 'hello'", - &[], - )); - or_panic!(conn.execute( - "NOTIFY test_notification_iterator_one_channel2, 'world'", - &[], - )); - - check_notification( - Notification { - process_id: 0, - channel: "test_notification_iterator_one_channel".to_string(), - payload: "hello".to_string(), - }, - it.next().unwrap().unwrap(), - ); - check_notification( - Notification { - process_id: 0, - channel: "test_notification_iterator_one_channel2".to_string(), - payload: "world".to_string(), - }, - it.next().unwrap().unwrap(), - ); - assert!(it.next().unwrap().is_none()); - - or_panic!(conn.execute("NOTIFY test_notification_iterator_one_channel, '!'", &[])); - check_notification( - Notification { - process_id: 0, - channel: "test_notification_iterator_one_channel".to_string(), - payload: "!".to_string(), - }, - it.next().unwrap().unwrap(), - ); - assert!(it.next().unwrap().is_none()); -} - -#[test] -fn test_notifications_next_block() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("LISTEN test_notifications_next_block", &[])); - - let _t = thread::spawn(|| { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - thread::sleep(Duration::from_millis(500)); - or_panic!(conn.execute("NOTIFY test_notifications_next_block, 'foo'", &[])); - }); - - let notifications = conn.notifications(); - check_notification( - Notification { - process_id: 0, - channel: "test_notifications_next_block".to_string(), - payload: "foo".to_string(), - }, - notifications.blocking_iter().next().unwrap().unwrap(), - ); -} - -#[test] -fn test_notification_next_timeout() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("LISTEN test_notifications_next_timeout", &[])); - - let _t = thread::spawn(|| { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - thread::sleep(Duration::from_millis(500)); - or_panic!(conn.execute("NOTIFY test_notifications_next_timeout, 'foo'", &[])); - thread::sleep(Duration::from_millis(1500)); - or_panic!(conn.execute("NOTIFY test_notifications_next_timeout, 'foo'", &[])); - }); - - let notifications = conn.notifications(); - let mut it = notifications.timeout_iter(Duration::from_secs(1)); - check_notification( - Notification { - process_id: 0, - channel: "test_notifications_next_timeout".to_string(), - payload: "foo".to_string(), - }, - it.next().unwrap().unwrap(), - ); - - assert!(it.next().unwrap().is_none()); -} - -#[test] -fn test_notification_disconnect() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("LISTEN test_notifications_disconnect", &[])); - - let _t = thread::spawn(|| { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - thread::sleep(Duration::from_millis(500)); - or_panic!(conn.execute( - "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE query = 'LISTEN test_notifications_disconnect'", - &[], - )); - }); - - let notifications = conn.notifications(); - assert!(notifications.blocking_iter().next().is_err()); -} - -#[test] -// This test is pretty sad, but I don't think there's a better way :( -fn test_cancel_query() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let cancel_data = conn.cancel_data(); - - let t = thread::spawn(move || { - thread::sleep(Duration::from_millis(500)); - assert!( - postgres::cancel_query( - "postgres://postgres@localhost:5433", - TlsMode::None, - &cancel_data, - ).is_ok() - ); - }); - - match conn.execute("SELECT pg_sleep(10)", &[]) { - Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => {} - Err(res) => panic!("Unexpected result {:?}", res), - _ => panic!("Unexpected result"), - } - - t.join().unwrap(); -} - -#[test] -fn test_plaintext_pass() { - or_panic!(Connection::connect( - "postgres://pass_user:password@localhost:5433/postgres", - TlsMode::None, - )); -} - -#[test] -fn test_plaintext_pass_no_pass() { - let ret = Connection::connect( - "postgres://pass_user@localhost:5433/postgres", - TlsMode::None, - ); - match ret { - Err(ref e) if e.as_connection().is_some() => (), - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_plaintext_pass_wrong_pass() { - let ret = Connection::connect( - "postgres://pass_user:asdf@localhost:5433/postgres", - TlsMode::None, - ); - match ret { - Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_md5_pass() { - or_panic!(Connection::connect( - "postgres://md5_user:password@localhost:5433/postgres", - TlsMode::None, - )); -} - -#[test] -fn test_md5_pass_no_pass() { - let ret = Connection::connect("postgres://md5_user@localhost:5433/postgres", TlsMode::None); - match ret { - Err(ref e) if e.as_connection().is_some() => (), - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_md5_pass_wrong_pass() { - let ret = Connection::connect( - "postgres://md5_user:asdf@localhost:5433/postgres", - TlsMode::None, - ); - match ret { - Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_scram_pass() { - or_panic!(Connection::connect( - "postgres://scram_user:password@localhost:5433/postgres", - TlsMode::None, - )); -} - -#[test] -fn test_scram_pass_no_pass() { - let ret = Connection::connect( - "postgres://scram_user@localhost:5433/postgres", - TlsMode::None, - ); - match ret { - Err(ref e) if e.as_connection().is_some() => (), - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_scram_pass_wrong_pass() { - let ret = Connection::connect( - "postgres://scram_user:asdf@localhost:5433/postgres", - TlsMode::None, - ); - match ret { - Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} - Err(err) => panic!("Unexpected error {:?}", err), - _ => panic!("Expected error"), - } -} - -#[test] -fn test_execute_copy_from_err() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[])); - let stmt = or_panic!(conn.prepare("COPY foo (id) FROM STDIN")); - - let err = stmt.execute(&[]).unwrap_err(); - match err.as_db() { - Some(err) if err.message.contains("COPY") => {} - _ => panic!("Unexpected error {:?}", err), - } - - let err = stmt.execute(&[]).unwrap_err(); - match err.as_db() { - Some(err) if err.message.contains("COPY") => {} - _ => panic!("Unexpected error {:?}", err), - } -} - -#[test] -#[allow(deprecated)] -fn test_batch_execute_copy_from_err() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[])); - let err = conn.batch_execute("COPY foo (id) FROM STDIN").unwrap_err(); - match err.as_db() { - Some(err) if err.message.contains("COPY") => {} - _ => panic!("Unexpected error {:?}", err), - } -} - -#[test] -fn test_copy_io_error() { - struct ErrorReader; - - impl io::Read for ErrorReader { - fn read(&mut self, _: &mut [u8]) -> io::Result { - Err(io::Error::new(io::ErrorKind::AddrNotAvailable, "boom")) - } - } - - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[])); - let stmt = or_panic!(conn.prepare("COPY foo (id) FROM STDIN")); - let err = stmt.copy_in(&[], &mut ErrorReader).unwrap_err(); - match err.as_io() { - Some(e) if e.kind() == io::ErrorKind::AddrNotAvailable => {} - _ => panic!("Unexpected error {:?}", err), - } - - or_panic!(conn.execute("SELECT 1", &[])); -} - -#[test] -fn test_copy() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[])); - let stmt = or_panic!(conn.prepare("COPY foo (id) FROM STDIN")); - let mut data = &b"1\n2\n3\n5\n8\n"[..]; - assert_eq!(5, or_panic!(stmt.copy_in(&[], &mut data))); - let stmt = or_panic!(conn.prepare("SELECT id FROM foo ORDER BY id")); - assert_eq!( - vec![1i32, 2, 3, 5, 8], - stmt.query(&[]) - .unwrap() - .iter() - .map(|r| r.get(0)) - .collect::>() - ); -} - -#[test] -fn test_query_copy_out_err() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT); - INSERT INTO foo (id) VALUES (0), (1), (2), (3)", - )); - let stmt = or_panic!(conn.prepare("COPY foo (id) TO STDOUT")); - let err = stmt.query(&[]).unwrap_err(); - match err.as_io() { - Some(e) if e.to_string().contains("COPY") => {} - _ => panic!("unexpected error {:?}", err), - }; -} - -#[test] -fn test_copy_out() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT); - INSERT INTO foo (id) VALUES (0), (1), (2), (3)", - )); - let stmt = or_panic!(conn.prepare("COPY (SELECT id FROM foo ORDER BY id) TO STDOUT")); - let mut buf = vec![]; - let count = or_panic!(stmt.copy_out(&[], &mut buf)); - assert_eq!(count, 4); - assert_eq!(buf, b"0\n1\n2\n3\n"); - or_panic!(conn.simple_query("SELECT 1")); -} - -#[test] -fn test_copy_out_error() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT); - INSERT INTO foo (id) VALUES (0), (1), (2), (3)", - )); - let stmt = or_panic!(conn.prepare("COPY (SELECT id FROM foo ORDER BY id) TO STDOUT (OIDS)")); - let mut buf = vec![]; - let err = stmt.copy_out(&[], &mut buf).unwrap_err(); - match err.as_db() { - Some(_) => {} - _ => panic!("unexpected error {}", err), - } -} - -#[test] -// Just make sure the impls don't infinite loop -fn test_generic_connection() { - fn f(t: &T) - where - T: GenericConnection, - { - or_panic!(t.execute("SELECT 1", &[])); - } - - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - f(&conn); - let trans = or_panic!(conn.transaction()); - f(&trans); -} - -#[test] -fn test_custom_range_element_type() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute( - "CREATE TYPE pg_temp.floatrange AS RANGE ( - subtype = float8, - subtype_diff = float8mi - )", - &[], - )); - let stmt = or_panic!(conn.prepare("SELECT $1::floatrange")); - let ty = &stmt.param_types()[0]; - assert_eq!("floatrange", ty.name()); - assert_eq!(&Kind::Range(Type::FLOAT8), ty.kind()); -} - -#[test] -fn test_prepare_cached() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute("CREATE TEMPORARY TABLE foo (id INT)", &[])); - or_panic!(conn.execute("INSERT INTO foo (id) VALUES (1), (2)", &[])); - - let stmt = or_panic!(conn.prepare_cached("SELECT id FROM foo ORDER BY id")); - assert_eq!( - vec![1, 2], - or_panic!(stmt.query(&[])) - .iter() - .map(|r| r.get(0)) - .collect::>() - ); - or_panic!(stmt.finish()); - - let stmt = or_panic!(conn.prepare_cached("SELECT id FROM foo ORDER BY id")); - assert_eq!( - vec![1, 2], - or_panic!(stmt.query(&[])) - .iter() - .map(|r| r.get(0)) - .collect::>() - ); - or_panic!(stmt.finish()); - - let stmt = or_panic!(conn.prepare_cached("SELECT id FROM foo ORDER BY id DESC")); - assert_eq!( - vec![2, 1], - or_panic!(stmt.query(&[])) - .iter() - .map(|r| r.get(0)) - .collect::>() - ); - or_panic!(stmt.finish()); -} - -#[test] -fn test_is_active() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert!(conn.is_active()); - let trans = or_panic!(conn.transaction()); - assert!(!conn.is_active()); - assert!(trans.is_active()); - { - let trans2 = or_panic!(trans.transaction()); - assert!(!conn.is_active()); - assert!(!trans.is_active()); - assert!(trans2.is_active()); - or_panic!(trans2.finish()); - } - assert!(!conn.is_active()); - assert!(trans.is_active()); - or_panic!(trans.finish()); - assert!(conn.is_active()); -} - -#[test] -fn test_parameter() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert_eq!(Some("UTF8".to_string()), conn.parameter("client_encoding")); - assert_eq!(None, conn.parameter("asdf")); -} - -#[test] -fn url_unencoded_password() { - assert!( - "postgresql://username:password%1*@localhost:5433" - .into_connect_params() - .is_err() - ) -} - -#[test] -fn url_encoded_password() { - let params = "postgresql://username%7b%7c:password%7b%7c@localhost:5433" - .into_connect_params() - .unwrap(); - assert_eq!("username{|", params.user().unwrap().name()); - assert_eq!("password{|", params.user().unwrap().password().unwrap()); -} - -#[test] -fn test_transaction_isolation_level() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - assert_eq!( - IsolationLevel::ReadCommitted, - or_panic!(conn.transaction_isolation()) - ); - or_panic!(conn.set_transaction_config( - transaction::Config::new().isolation_level(IsolationLevel::ReadUncommitted), - )); - assert_eq!( - IsolationLevel::ReadUncommitted, - or_panic!(conn.transaction_isolation()) - ); - or_panic!(conn.set_transaction_config( - transaction::Config::new().isolation_level(IsolationLevel::RepeatableRead), - )); - assert_eq!( - IsolationLevel::RepeatableRead, - or_panic!(conn.transaction_isolation()) - ); - or_panic!(conn.set_transaction_config( - transaction::Config::new().isolation_level(IsolationLevel::Serializable), - )); - assert_eq!( - IsolationLevel::Serializable, - or_panic!(conn.transaction_isolation()) - ); - or_panic!(conn.set_transaction_config( - transaction::Config::new().isolation_level(IsolationLevel::ReadCommitted), - )); - assert_eq!( - IsolationLevel::ReadCommitted, - or_panic!(conn.transaction_isolation()) - ); -} - -#[test] -fn test_rows_index() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY); - INSERT INTO foo (id) VALUES (1), (2), (3); - ", - ).unwrap(); - let stmt = conn.prepare("SELECT id FROM foo ORDER BY id").unwrap(); - let rows = stmt.query(&[]).unwrap(); - assert_eq!(3, rows.len()); - assert_eq!(2i32, rows.get(1).get::<_, i32>(0)); -} - -#[test] -fn test_type_names() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let stmt = conn - .prepare( - "SELECT t.oid, t.typname - FROM pg_catalog.pg_type t, pg_namespace n - WHERE n.oid = t.typnamespace - AND n.nspname = 'pg_catalog' - AND t.oid < 10000 - AND t.typtype != 'c'", - ).unwrap(); - for row in &stmt.query(&[]).unwrap() { - let id: Oid = row.get(0); - let name: String = row.get(1); - assert_eq!(Type::from_oid(id).unwrap().name(), name); - } -} - -#[test] -fn test_conn_query() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY); - INSERT INTO foo (id) VALUES (1), (2), (3); - ", - ).unwrap(); - let ids = conn - .query("SELECT id FROM foo ORDER BY id", &[]) - .unwrap() - .iter() - .map(|r| r.get(0)) - .collect::>(); - assert_eq!(ids, [1, 2, 3]); -} - -#[test] -fn transaction_config() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let mut config = transaction::Config::new(); - config - .isolation_level(IsolationLevel::Serializable) - .read_only(true) - .deferrable(true); - conn.set_transaction_config(&config).unwrap(); -} - -#[test] -fn transaction_config_one_setting() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.set_transaction_config(transaction::Config::new().read_only(true)) - .unwrap(); - conn.set_transaction_config(transaction::Config::new().deferrable(true)) - .unwrap(); -} - -#[test] -fn transaction_with() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let mut config = transaction::Config::new(); - config - .isolation_level(IsolationLevel::Serializable) - .read_only(true) - .deferrable(true); - conn.transaction_with(&config).unwrap().finish().unwrap(); -} - -#[test] -fn transaction_set_config() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let trans = conn.transaction().unwrap(); - let mut config = transaction::Config::new(); - config - .isolation_level(IsolationLevel::Serializable) - .read_only(true) - .deferrable(true); - trans.set_config(&config).unwrap(); - trans.finish().unwrap(); -} - -#[test] -fn keepalive() { - let params = "postgres://postgres@localhost:5433?keepalive=10" - .into_connect_params() - .unwrap(); - assert_eq!(params.keepalive(), Some(Duration::from_secs(10))); - - Connection::connect(params, TlsMode::None).unwrap(); -} - -#[test] -fn explicit_types() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - let stmt = conn - .prepare_typed("SELECT $1::INT4", &[Some(Type::INT8)]) - .unwrap(); - assert_eq!(stmt.param_types()[0], Type::INT8); -} - -#[test] -fn simple_query() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - " - CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY); - INSERT INTO foo (id) VALUES (1), (2), (3); - ", - ).unwrap(); - let queries = "SELECT id FROM foo WHERE id = 1 ORDER BY id; \ - SELECT id FROM foo WHERE id != 1 ORDER BY id"; - - let results = conn.simple_query(queries).unwrap(); - assert_eq!(results[0].get(0).get("id"), "1"); - assert_eq!(results[1].get(0).get("id"), "2"); - assert_eq!(results[1].get(1).get("id"), "3"); -} diff --git a/postgres/tests/types/bit_vec.rs b/postgres/tests/types/bit_vec.rs deleted file mode 100644 index 2e0ac53de..000000000 --- a/postgres/tests/types/bit_vec.rs +++ /dev/null @@ -1,30 +0,0 @@ -extern crate bit_vec; - -use self::bit_vec::BitVec; -use types::test_type; - -#[test] -fn test_bit_params() { - let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); - bv.pop(); - bv.pop(); - test_type( - "BIT(14)", - &[(Some(bv), "B'01101001000001'"), (None, "NULL")], - ) -} - -#[test] -fn test_varbit_params() { - let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); - bv.pop(); - bv.pop(); - test_type( - "VARBIT", - &[ - (Some(bv), "B'01101001000001'"), - (Some(BitVec::from_bytes(&[])), "B''"), - (None, "NULL"), - ], - ) -} diff --git a/postgres/tests/types/chrono.rs b/postgres/tests/types/chrono.rs deleted file mode 100644 index 3d5ef64d3..000000000 --- a/postgres/tests/types/chrono.rs +++ /dev/null @@ -1,150 +0,0 @@ -extern crate chrono; - -use self::chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; -use types::test_type; - -use postgres::types::{Date, Timestamp}; - -#[test] -fn test_naive_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { - ( - Some(NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), - time, - ) - } - test_type( - "TIMESTAMP", - &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_with_special_naive_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Timestamp, &'a str) { - ( - Timestamp::Value( - NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), - ), - time, - ) - } - test_type( - "TIMESTAMP", - &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), - (Timestamp::PosInfinity, "'infinity'"), - (Timestamp::NegInfinity, "'-infinity'"), - ], - ); -} - -#[test] -fn test_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Option>, &'a str) { - ( - Some( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), - ), - time, - ) - } - test_type( - "TIMESTAMP WITH TIME ZONE", - &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_with_special_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Timestamp>, &'a str) { - ( - Timestamp::Value( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), - ), - time, - ) - } - test_type( - "TIMESTAMP WITH TIME ZONE", - &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), - (Timestamp::PosInfinity, "'infinity'"), - (Timestamp::NegInfinity, "'-infinity'"), - ], - ); -} - -#[test] -fn test_date_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { - ( - Some(NaiveDate::parse_from_str(time, "'%Y-%m-%d'").unwrap()), - time, - ) - } - test_type( - "DATE", - &[ - make_check("'1970-01-01'"), - make_check("'1965-09-25'"), - make_check("'2010-02-09'"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_with_special_date_params() { - fn make_check<'a>(date: &'a str) -> (Date, &'a str) { - ( - Date::Value(NaiveDate::parse_from_str(date, "'%Y-%m-%d'").unwrap()), - date, - ) - } - test_type( - "DATE", - &[ - make_check("'1970-01-01'"), - make_check("'1965-09-25'"), - make_check("'2010-02-09'"), - (Date::PosInfinity, "'infinity'"), - (Date::NegInfinity, "'-infinity'"), - ], - ); -} - -#[test] -fn test_time_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { - ( - Some(NaiveTime::parse_from_str(time, "'%H:%M:%S.%f'").unwrap()), - time, - ) - } - test_type( - "TIME", - &[ - make_check("'00:00:00.010000000'"), - make_check("'11:19:33.100314000'"), - make_check("'23:11:45.120200000'"), - (None, "NULL"), - ], - ); -} diff --git a/postgres/tests/types/eui48.rs b/postgres/tests/types/eui48.rs deleted file mode 100644 index dc77078e9..000000000 --- a/postgres/tests/types/eui48.rs +++ /dev/null @@ -1,17 +0,0 @@ -extern crate eui48; - -use types::test_type; - -#[test] -fn test_eui48_params() { - test_type( - "MACADDR", - &[ - ( - Some(eui48::MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), - "'12-34-56-ab-cd-ef'", - ), - (None, "NULL"), - ], - ) -} diff --git a/postgres/tests/types/geo.rs b/postgres/tests/types/geo.rs deleted file mode 100644 index 43a95debe..000000000 --- a/postgres/tests/types/geo.rs +++ /dev/null @@ -1,58 +0,0 @@ -extern crate geo; - -use self::geo::{Coordinate, LineString, Point, Rect}; -use types::test_type; - -#[test] -fn test_point_params() { - test_type( - "POINT", - &[ - (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), - (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_box_params() { - test_type( - "BOX", - &[ - ( - Some(Rect { - min: Coordinate { x: -3.14, y: 1.618 }, - max: Coordinate { - x: 160.0, - y: 69701.5615, - }, - }), - "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", - ), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_path_params() { - let points = vec![ - Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.14, y: 1.618 }, - Coordinate { - x: 160.0, - y: 69701.5615, - }, - ]; - test_type( - "PATH", - &[ - ( - Some(LineString(points)), - "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", - ), - (None, "NULL"), - ], - ); -} diff --git a/postgres/tests/types/mod.rs b/postgres/tests/types/mod.rs deleted file mode 100644 index 0a0be5384..000000000 --- a/postgres/tests/types/mod.rs +++ /dev/null @@ -1,530 +0,0 @@ -use std::collections::HashMap; -use std::error; -use std::f32; -use std::f64; -use std::fmt; -use std::result; -use std::time::{Duration, UNIX_EPOCH}; - -use postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; -use postgres::{Connection, TlsMode}; - -#[cfg(feature = "with-bit-vec-0.5")] -mod bit_vec; -#[cfg(feature = "with-chrono-0.4")] -mod chrono; -#[cfg(feature = "with-eui48-0.3")] -mod eui48; -#[cfg(feature = "with-geo-0.10")] -mod geo; -#[cfg(feature = "with-serde_json-1")] -mod serde_json; -#[cfg(feature = "with-uuid-0.6")] -mod uuid; - -fn test_type(sql_type: &str, checks: &[(T, S)]) -where - T: PartialEq + for<'a> FromSqlOwned + ToSql, - S: fmt::Display, -{ - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - for &(ref val, ref repr) in checks.iter() { - let stmt = or_panic!(conn.prepare(&*format!("SELECT {}::{}", *repr, sql_type))); - let rows = or_panic!(stmt.query(&[])); - let row = rows.iter().next().unwrap(); - let result = row.get(0); - assert_eq!(val, &result); - - let stmt = or_panic!(conn.prepare(&*format!("SELECT $1::{}", sql_type))); - let rows = or_panic!(stmt.query(&[val])); - let row = rows.iter().next().unwrap(); - let result = row.get(0); - assert_eq!(val, &result); - } -} - -#[test] -fn test_ref_tosql() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = conn.prepare("SELECT $1::Int").unwrap(); - let num: &ToSql = &&7; - stmt.query(&[num]).unwrap(); -} - -#[test] -fn test_bool_params() { - test_type( - "BOOL", - &[(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], - ); -} - -#[test] -fn test_i8_params() { - test_type("\"char\"", &[(Some('a' as i8), "'a'"), (None, "NULL")]); -} - -#[test] -fn test_name_params() { - test_type( - "NAME", - &[ - (Some("hello world".to_owned()), "'hello world'"), - ( - Some("イロハニホヘト チリヌルヲ".to_owned()), - "'イロハニホヘト チリヌルヲ'", - ), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_i16_params() { - test_type( - "SMALLINT", - &[ - (Some(15001i16), "15001"), - (Some(-15001i16), "-15001"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_i32_params() { - test_type( - "INT", - &[ - (Some(2147483548i32), "2147483548"), - (Some(-2147483548i32), "-2147483548"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_oid_params() { - test_type( - "OID", - &[ - (Some(2147483548u32), "2147483548"), - (Some(4000000000), "4000000000"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_i64_params() { - test_type( - "BIGINT", - &[ - (Some(9223372036854775708i64), "9223372036854775708"), - (Some(-9223372036854775708i64), "-9223372036854775708"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_f32_params() { - test_type( - "REAL", - &[ - (Some(f32::INFINITY), "'infinity'"), - (Some(f32::NEG_INFINITY), "'-infinity'"), - (Some(1000.55), "1000.55"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_f64_params() { - test_type( - "DOUBLE PRECISION", - &[ - (Some(f64::INFINITY), "'infinity'"), - (Some(f64::NEG_INFINITY), "'-infinity'"), - (Some(10000.55), "10000.55"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_varchar_params() { - test_type( - "VARCHAR", - &[ - (Some("hello world".to_owned()), "'hello world'"), - ( - Some("イロハニホヘト チリヌルヲ".to_owned()), - "'イロハニホヘト チリヌルヲ'", - ), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_text_params() { - test_type( - "TEXT", - &[ - (Some("hello world".to_owned()), "'hello world'"), - ( - Some("イロハニホヘト チリヌルヲ".to_owned()), - "'イロハニホヘト チリヌルヲ'", - ), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_borrowed_text() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - - let rows = or_panic!(conn.query("SELECT 'foo'", &[])); - let row = rows.get(0); - let s: &str = row.get(0); - assert_eq!(s, "foo"); -} - -#[test] -fn test_bpchar_params() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - b CHAR(5) - )", - &[], - )); - or_panic!(conn.execute( - "INSERT INTO foo (b) VALUES ($1), ($2), ($3)", - &[&Some("12345"), &Some("123"), &None::<&'static str>], - )); - let stmt = or_panic!(conn.prepare("SELECT b FROM foo ORDER BY id")); - let res = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![Some("12345".to_owned()), Some("123 ".to_owned()), None], - res.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_citext_params() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - or_panic!(conn.execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - b CITEXT - )", - &[], - )); - or_panic!(conn.execute( - "INSERT INTO foo (b) VALUES ($1), ($2), ($3)", - &[&Some("foobar"), &Some("FooBar"), &None::<&'static str>], - )); - let stmt = or_panic!(conn.prepare("SELECT id FROM foo WHERE b = 'FOOBAR' ORDER BY id",)); - let res = or_panic!(stmt.query(&[])); - - assert_eq!( - vec![Some(1i32), Some(2i32)], - res.iter().map(|row| row.get(0)).collect::>() - ); -} - -#[test] -fn test_bytea_params() { - test_type( - "BYTEA", - &[ - (Some(vec![0u8, 1, 2, 3, 254, 255]), "'\\x00010203feff'"), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_borrowed_bytea() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - - let rows = or_panic!(conn.query("SELECT 'foo'::BYTEA", &[])); - let row = rows.get(0); - let s: &[u8] = row.get(0); - assert_eq!(s, b"foo"); -} - -#[test] -fn test_hstore_params() { - macro_rules! make_map { - ($($k:expr => $v:expr),+) => ({ - let mut map = HashMap::new(); - $(map.insert($k, $v);)+ - map - }) - } - test_type( - "hstore", - &[ - ( - Some(make_map!("a".to_owned() => Some("1".to_owned()))), - "'a=>1'", - ), - ( - Some(make_map!("hello".to_owned() => Some("world!".to_owned()), - "hola".to_owned() => Some("mundo!".to_owned()), - "what".to_owned() => None)), - "'hello=>world!,hola=>mundo!,what=>NULL'", - ), - (None, "NULL"), - ], - ); -} - -#[test] -fn test_array_params() { - test_type( - "integer[]", - &[ - (Some(vec![1i32, 2i32]), "ARRAY[1,2]"), - (Some(vec![1i32]), "ARRAY[1]"), - (Some(vec![]), "ARRAY[]"), - (None, "NULL"), - ], - ); -} - -fn test_nan_param(sql_type: &str) -where - T: PartialEq + ToSql + FromSqlOwned, -{ - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare(&*format!("SELECT 'NaN'::{}", sql_type))); - let result = or_panic!(stmt.query(&[])); - let val: T = result.iter().next().unwrap().get(0); - assert!(val != val); -} - -#[test] -fn test_f32_nan_param() { - test_nan_param::("REAL"); -} - -#[test] -fn test_f64_nan_param() { - test_nan_param::("DOUBLE PRECISION"); -} - -#[test] -fn test_pg_database_datname() { - let conn = or_panic!(Connection::connect( - "postgres://postgres@localhost:5433", - TlsMode::None, - )); - let stmt = or_panic!(conn.prepare("SELECT datname FROM pg_database")); - let result = or_panic!(stmt.query(&[])); - - let next = result.iter().next().unwrap(); - or_panic!(next.get_opt::<_, String>(0).unwrap()); - or_panic!(next.get_opt::<_, String>("datname").unwrap()); -} - -#[test] -fn test_slice() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - "CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY, f VARCHAR); - INSERT INTO foo (f) VALUES ('a'), ('b'), ('c'), ('d');", - ).unwrap(); - - let stmt = conn - .prepare("SELECT f FROM foo WHERE id = ANY($1)") - .unwrap(); - let result = stmt.query(&[&&[1i32, 3, 4][..]]).unwrap(); - assert_eq!( - vec!["a".to_owned(), "c".to_owned(), "d".to_owned()], - result - .iter() - .map(|r| r.get::<_, String>(0)) - .collect::>() - ); -} - -#[test] -fn test_slice_wrong_type() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") - .unwrap(); - - let stmt = conn - .prepare("SELECT * FROM foo WHERE id = ANY($1)") - .unwrap(); - let err = stmt.query(&[&&["hi"][..]]).unwrap_err(); - match err.as_conversion() { - Some(e) if e.is::() => {} - _ => panic!("Unexpected error {:?}", err), - }; -} - -#[test] -fn test_slice_range() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - - let stmt = conn.prepare("SELECT $1::INT8RANGE").unwrap(); - let err = stmt.query(&[&&[1i64][..]]).unwrap_err(); - match err.as_conversion() { - Some(e) if e.is::() => {} - _ => panic!("Unexpected error {:?}", err), - }; -} - -#[test] -fn domain() { - #[derive(Debug, PartialEq)] - struct SessionId(Vec); - - impl ToSql for SessionId { - fn to_sql( - &self, - ty: &Type, - out: &mut Vec, - ) -> result::Result> { - let inner = match *ty.kind() { - Kind::Domain(ref inner) => inner, - _ => unreachable!(), - }; - self.0.to_sql(inner, out) - } - - fn accepts(ty: &Type) -> bool { - ty.name() == "session_id" && match *ty.kind() { - Kind::Domain(_) => true, - _ => false, - } - } - - to_sql_checked!(); - } - - impl<'a> FromSql<'a> for SessionId { - fn from_sql( - ty: &Type, - raw: &[u8], - ) -> result::Result> { - Vec::::from_sql(ty, raw).map(SessionId) - } - - fn accepts(ty: &Type) -> bool { - // This is super weird! - as FromSql>::accepts(ty) - } - } - - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); - CREATE TABLE pg_temp.foo (id pg_temp.session_id);", - ).unwrap(); - - let id = SessionId(b"0123456789abcdef".to_vec()); - conn.execute("INSERT INTO pg_temp.foo (id) VALUES ($1)", &[&id]) - .unwrap(); - let rows = conn.query("SELECT id FROM pg_temp.foo", &[]).unwrap(); - assert_eq!(id, rows.get(0).get(0)); -} - -#[test] -fn composite() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query( - "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", - ).unwrap(); - - let stmt = conn.prepare("SELECT $1::inventory_item").unwrap(); - let type_ = &stmt.param_types()[0]; - assert_eq!(type_.name(), "inventory_item"); - match *type_.kind() { - Kind::Composite(ref fields) => { - assert_eq!(fields[0].name(), "name"); - assert_eq!(fields[0].type_(), &Type::TEXT); - assert_eq!(fields[1].name(), "supplier"); - assert_eq!(fields[1].type_(), &Type::INT4); - assert_eq!(fields[2].name(), "price"); - assert_eq!(fields[2].type_(), &Type::NUMERIC); - } - ref t => panic!("bad type {:?}", t), - } -} - -#[test] -fn enum_() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.simple_query("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy');") - .unwrap(); - - let stmt = conn.prepare("SELECT $1::mood").unwrap(); - let type_ = &stmt.param_types()[0]; - assert_eq!(type_.name(), "mood"); - match *type_.kind() { - Kind::Enum(ref variants) => { - assert_eq!( - variants, - &["sad".to_owned(), "ok".to_owned(), "happy".to_owned()] - ); - } - _ => panic!("bad type"), - } -} - -#[test] -fn system_time() { - test_type( - "TIMESTAMP", - &[ - ( - Some(UNIX_EPOCH + Duration::from_millis(1_010)), - "'1970-01-01 00:00:01.01'", - ), - ( - Some(UNIX_EPOCH - Duration::from_millis(1_010)), - "'1969-12-31 23:59:58.99'", - ), - ( - Some(UNIX_EPOCH + Duration::from_millis(946684800 * 1000 + 1_010)), - "'2000-01-01 00:00:01.01'", - ), - (None, "NULL"), - ], - ); -} diff --git a/postgres/tests/types/serde_json.rs b/postgres/tests/types/serde_json.rs deleted file mode 100644 index bf62a8cbf..000000000 --- a/postgres/tests/types/serde_json.rs +++ /dev/null @@ -1,40 +0,0 @@ -extern crate serde_json; - -use self::serde_json::Value; -use types::test_type; - -#[test] -fn test_json_params() { - test_type( - "JSON", - &[ - ( - Some(serde_json::from_str::("[10, 11, 12]").unwrap()), - "'[10, 11, 12]'", - ), - ( - Some(serde_json::from_str::("{\"f\": \"asd\"}").unwrap()), - "'{\"f\": \"asd\"}'", - ), - (None, "NULL"), - ], - ) -} - -#[test] -fn test_jsonb_params() { - test_type( - "JSONB", - &[ - ( - Some(serde_json::from_str::("[10, 11, 12]").unwrap()), - "'[10, 11, 12]'", - ), - ( - Some(serde_json::from_str::("{\"f\": \"asd\"}").unwrap()), - "'{\"f\": \"asd\"}'", - ), - (None, "NULL"), - ], - ) -} diff --git a/postgres/tests/types/uuid.rs b/postgres/tests/types/uuid.rs deleted file mode 100644 index d1b995ad8..000000000 --- a/postgres/tests/types/uuid.rs +++ /dev/null @@ -1,17 +0,0 @@ -extern crate uuid; - -use types::test_type; - -#[test] -fn test_uuid_params() { - test_type( - "UUID", - &[ - ( - Some(uuid::Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), - "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", - ), - (None, "NULL"), - ], - ) -} From 6a5f22cd5d481f4cce377559435a11f17dd22dd9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:18:08 -0800 Subject: [PATCH 027/819] Migrate postgres-protocol to 2018 edition --- .circleci/config.yml | 2 +- postgres-protocol/Cargo.toml | 1 + postgres-protocol/src/authentication/sasl.rs | 11 ++- postgres-protocol/src/lib.rs | 13 +-- postgres-protocol/src/message/backend.rs | 2 +- postgres-protocol/src/message/frontend.rs | 13 +-- postgres-protocol/src/types.rs | 86 ++++++++++---------- 7 files changed, 59 insertions(+), 69 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 47efb05d2..9ea844988 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.30.1 + - image: rust:1.31.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:5 diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 72194ac78..2c8157e59 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -2,6 +2,7 @@ name = "postgres-protocol" version = "0.3.2" authors = ["Steven Fackler "] +edition = "2018" description = "Low level Postgres protocol APIs" license = "MIT/Apache-2.0" repository = "https://github.com/sfackler/rust-postgres-protocol" diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index cd46d67d2..8488fa3c8 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,6 +1,5 @@ //! SASL-based authentication support. -use base64; use generic_array::typenum::U32; use generic_array::GenericArray; use hmac::{Hmac, Mac}; @@ -11,7 +10,6 @@ use std::io; use std::iter; use std::mem; use std::str; -use stringprep; const NONCE_LENGTH: usize = 24; @@ -143,7 +141,8 @@ impl ScramSha256 { v = 0x7e } v as char - }).collect::(); + }) + .collect::(); ScramSha256::new_inner(password, channel_binding, nonce) } @@ -333,7 +332,7 @@ impl<'a> Parser<'a> { fn printable(&mut self) -> io::Result<&'a str> { self.take_while(|c| match c { - '\x21'...'\x2b' | '\x2d'...'\x7e' => true, + '\x21'..='\x2b' | '\x2d'..='\x7e' => true, _ => false, }) } @@ -346,7 +345,7 @@ impl<'a> Parser<'a> { fn base64(&mut self) -> io::Result<&'a str> { self.take_while(|c| match c { - 'a'...'z' | 'A'...'Z' | '0'...'9' | '/' | '+' | '=' => true, + 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=' => true, _ => false, }) } @@ -359,7 +358,7 @@ impl<'a> Parser<'a> { fn posit_number(&mut self) -> io::Result { let n = self.take_while(|c| match c { - '0'...'9' => true, + '0'..='9' => true, _ => false, })?; n.parse() diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index da06a4c36..aa815c361 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -10,18 +10,7 @@ //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. #![doc(html_root_url = "https://docs.rs/postgres-protocol/0.3")] -#![warn(missing_docs)] -extern crate base64; -extern crate byteorder; -extern crate bytes; -extern crate fallible_iterator; -extern crate generic_array; -extern crate hmac; -extern crate md5; -extern crate memchr; -extern crate rand; -extern crate sha2; -extern crate stringprep; +#![warn(missing_docs, rust_2018_idioms)] use byteorder::{BigEndian, ByteOrder}; use std::io; diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index eacb5da47..c11516d13 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -9,7 +9,7 @@ use std::io::{self, Read}; use std::ops::Range; use std::str; -use Oid; +use crate::Oid; /// An enum representing Postgres backend messages. pub enum Message { diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index a0c20a836..edb929ef9 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -6,7 +6,7 @@ use std::error::Error; use std::io; use std::marker; -use {write_nullable, FromUsize, IsNull, Oid}; +use crate::{write_nullable, FromUsize, IsNull, Oid}; pub enum Message<'a> { Bind { @@ -148,13 +148,13 @@ where } pub enum BindError { - Conversion(Box), + Conversion(Box), Serialization(io::Error), } -impl From> for BindError { +impl From> for BindError { #[inline] - fn from(e: Box) -> BindError { + fn from(e: Box) -> BindError { BindError::Conversion(e) } } @@ -179,7 +179,7 @@ pub fn bind( where I: IntoIterator, J: IntoIterator, - F: FnMut(T, &mut Vec) -> Result>, + F: FnMut(T, &mut Vec) -> Result>, K: IntoIterator, { buf.push(b'B'); @@ -225,7 +225,8 @@ pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut Vec) { buf.write_i32::(80877102).unwrap(); buf.write_i32::(process_id).unwrap(); buf.write_i32::(secret_key) - }).unwrap(); + }) + .unwrap(); } #[inline] diff --git a/postgres-protocol/src/types.rs b/postgres-protocol/src/types.rs index 1066ee6a4..b5d65cb33 100644 --- a/postgres-protocol/src/types.rs +++ b/postgres-protocol/src/types.rs @@ -5,7 +5,7 @@ use std::boxed::Box as StdBox; use std::error::Error; use std::str; -use {write_nullable, FromUsize, IsNull, Oid}; +use crate::{write_nullable, FromUsize, IsNull, Oid}; const RANGE_UPPER_UNBOUNDED: u8 = 0b0001_0000; const RANGE_LOWER_UNBOUNDED: u8 = 0b0000_1000; @@ -21,7 +21,7 @@ pub fn bool_to_sql(v: bool, buf: &mut Vec) { /// Deserializes a `BOOL` value. #[inline] -pub fn bool_from_sql(buf: &[u8]) -> Result> { +pub fn bool_from_sql(buf: &[u8]) -> Result> { if buf.len() != 1 { return Err("invalid buffer size".into()); } @@ -49,7 +49,7 @@ pub fn text_to_sql(v: &str, buf: &mut Vec) { /// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. #[inline] -pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox> { +pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox> { Ok(str::from_utf8(buf)?) } @@ -61,7 +61,7 @@ pub fn char_to_sql(v: i8, buf: &mut Vec) { /// Deserializes a `"char"` value. #[inline] -pub fn char_from_sql(mut buf: &[u8]) -> Result> { +pub fn char_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i8()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -77,7 +77,7 @@ pub fn int2_to_sql(v: i16, buf: &mut Vec) { /// Deserializes an `INT2` value. #[inline] -pub fn int2_from_sql(mut buf: &[u8]) -> Result> { +pub fn int2_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i16::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -93,7 +93,7 @@ pub fn int4_to_sql(v: i32, buf: &mut Vec) { /// Deserializes an `INT4` value. #[inline] -pub fn int4_from_sql(mut buf: &[u8]) -> Result> { +pub fn int4_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -109,7 +109,7 @@ pub fn oid_to_sql(v: Oid, buf: &mut Vec) { /// Deserializes an `OID` value. #[inline] -pub fn oid_from_sql(mut buf: &[u8]) -> Result> { +pub fn oid_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_u32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -125,7 +125,7 @@ pub fn int8_to_sql(v: i64, buf: &mut Vec) { /// Deserializes an `INT8` value. #[inline] -pub fn int8_from_sql(mut buf: &[u8]) -> Result> { +pub fn int8_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -141,7 +141,7 @@ pub fn float4_to_sql(v: f32, buf: &mut Vec) { /// Deserializes a `FLOAT4` value. #[inline] -pub fn float4_from_sql(mut buf: &[u8]) -> Result> { +pub fn float4_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_f32::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -157,7 +157,7 @@ pub fn float8_to_sql(v: f64, buf: &mut Vec) { /// Deserializes a `FLOAT8` value. #[inline] -pub fn float8_from_sql(mut buf: &[u8]) -> Result> { +pub fn float8_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_f64::()?; if !buf.is_empty() { return Err("invalid buffer size".into()); @@ -167,7 +167,7 @@ pub fn float8_from_sql(mut buf: &[u8]) -> Result(values: I, buf: &mut Vec) -> Result<(), StdBox> +pub fn hstore_to_sql<'a, I>(values: I, buf: &mut Vec) -> Result<(), StdBox> where I: IntoIterator)>, { @@ -194,7 +194,7 @@ where Ok(()) } -fn write_pascal_string(s: &str, buf: &mut Vec) -> Result<(), StdBox> { +fn write_pascal_string(s: &str, buf: &mut Vec) -> Result<(), StdBox> { let size = i32::from_usize(s.len())?; buf.write_i32::(size).unwrap(); buf.extend_from_slice(s.as_bytes()); @@ -205,7 +205,7 @@ fn write_pascal_string(s: &str, buf: &mut Vec) -> Result<(), StdBox( mut buf: &'a [u8], -) -> Result, StdBox> { +) -> Result, StdBox> { let count = buf.read_i32::()?; if count < 0 { return Err("invalid entry count".into()); @@ -225,10 +225,10 @@ pub struct HstoreEntries<'a> { impl<'a> FallibleIterator for HstoreEntries<'a> { type Item = (&'a str, Option<&'a str>); - type Error = StdBox; + type Error = StdBox; #[inline] - fn next(&mut self) -> Result)>, StdBox> { + fn next(&mut self) -> Result)>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid buffer size".into()); @@ -272,7 +272,7 @@ pub fn varbit_to_sql( len: usize, v: I, buf: &mut Vec, -) -> Result<(), StdBox> +) -> Result<(), StdBox> where I: Iterator, { @@ -288,7 +288,7 @@ where /// Deserializes a `VARBIT` or `BIT` value. #[inline] -pub fn varbit_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn varbit_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { return Err("invalid varbit length".into()); @@ -336,7 +336,7 @@ pub fn timestamp_to_sql(v: i64, buf: &mut Vec) { /// /// The value represents the number of microseconds since midnight, January 1st, 2000. #[inline] -pub fn timestamp_from_sql(mut buf: &[u8]) -> Result> { +pub fn timestamp_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid message length".into()); @@ -356,7 +356,7 @@ pub fn date_to_sql(v: i32, buf: &mut Vec) { /// /// The value represents the number of days since January 1st, 2000. #[inline] -pub fn date_from_sql(mut buf: &[u8]) -> Result> { +pub fn date_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i32::()?; if !buf.is_empty() { return Err("invalid message length".into()); @@ -376,7 +376,7 @@ pub fn time_to_sql(v: i64, buf: &mut Vec) { /// /// The value represents the number of microseconds since midnight. #[inline] -pub fn time_from_sql(mut buf: &[u8]) -> Result> { +pub fn time_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { return Err("invalid message length".into()); @@ -392,7 +392,7 @@ pub fn macaddr_to_sql(v: [u8; 6], buf: &mut Vec) { /// Deserializes a `MACADDR` value. #[inline] -pub fn macaddr_from_sql(buf: &[u8]) -> Result<[u8; 6], StdBox> { +pub fn macaddr_from_sql(buf: &[u8]) -> Result<[u8; 6], StdBox> { if buf.len() != 6 { return Err("invalid message length".into()); } @@ -409,7 +409,7 @@ pub fn uuid_to_sql(v: [u8; 16], buf: &mut Vec) { /// Deserializes a `UUID` value. #[inline] -pub fn uuid_from_sql(buf: &[u8]) -> Result<[u8; 16], StdBox> { +pub fn uuid_from_sql(buf: &[u8]) -> Result<[u8; 16], StdBox> { if buf.len() != 16 { return Err("invalid message length".into()); } @@ -426,11 +426,11 @@ pub fn array_to_sql( elements: J, mut serializer: F, buf: &mut Vec, -) -> Result<(), StdBox> +) -> Result<(), StdBox> where I: IntoIterator, J: IntoIterator, - F: FnMut(T, &mut Vec) -> Result>, + F: FnMut(T, &mut Vec) -> Result>, { let dimensions_idx = buf.len(); buf.extend_from_slice(&[0; 4]); @@ -469,7 +469,7 @@ where /// Deserializes an array value. #[inline] -pub fn array_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn array_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { let dimensions = buf.read_i32::()?; if dimensions < 0 { return Err("invalid dimension count".into()); @@ -547,10 +547,10 @@ pub struct ArrayDimensions<'a>(&'a [u8]); impl<'a> FallibleIterator for ArrayDimensions<'a> { type Item = ArrayDimension; - type Error = StdBox; + type Error = StdBox; #[inline] - fn next(&mut self) -> Result, StdBox> { + fn next(&mut self) -> Result, StdBox> { if self.0.is_empty() { return Ok(None); } @@ -589,10 +589,10 @@ pub struct ArrayValues<'a> { impl<'a> FallibleIterator for ArrayValues<'a> { type Item = Option<&'a [u8]>; - type Error = StdBox; + type Error = StdBox; #[inline] - fn next(&mut self) -> Result>, StdBox> { + fn next(&mut self) -> Result>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid message length".into()); @@ -634,10 +634,10 @@ pub fn range_to_sql( lower: F, upper: G, buf: &mut Vec, -) -> Result<(), StdBox> +) -> Result<(), StdBox> where - F: FnOnce(&mut Vec) -> Result, StdBox>, - G: FnOnce(&mut Vec) -> Result, StdBox>, + F: FnOnce(&mut Vec) -> Result, StdBox>, + G: FnOnce(&mut Vec) -> Result, StdBox>, { let tag_idx = buf.len(); buf.push(0); @@ -663,9 +663,9 @@ where fn write_bound( bound: F, buf: &mut Vec, -) -> Result, StdBox> +) -> Result, StdBox> where - F: FnOnce(&mut Vec) -> Result, StdBox>, + F: FnOnce(&mut Vec) -> Result, StdBox>, { let base = buf.len(); buf.extend_from_slice(&[0; 4]); @@ -702,7 +702,7 @@ pub enum RangeBound { /// Deserializes a range value. #[inline] -pub fn range_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn range_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { let tag = buf.read_u8()?; if tag == RANGE_EMPTY { @@ -728,7 +728,7 @@ fn read_bound<'a>( tag: u8, unbounded: u8, inclusive: u8, -) -> Result>, StdBox> { +) -> Result>, StdBox> { if tag & unbounded != 0 { Ok(RangeBound::Unbounded) } else { @@ -770,7 +770,7 @@ pub fn point_to_sql(x: f64, y: f64, buf: &mut Vec) { /// Deserializes a point value. #[inline] -pub fn point_from_sql(mut buf: &[u8]) -> Result> { +pub fn point_from_sql(mut buf: &[u8]) -> Result> { let x = buf.read_f64::()?; let y = buf.read_f64::()?; if !buf.is_empty() { @@ -811,7 +811,7 @@ pub fn box_to_sql(x1: f64, y1: f64, x2: f64, y2: f64, buf: &mut Vec) { /// Deserializes a box value. #[inline] -pub fn box_from_sql(mut buf: &[u8]) -> Result> { +pub fn box_from_sql(mut buf: &[u8]) -> Result> { let x1 = buf.read_f64::()?; let y1 = buf.read_f64::()?; let x2 = buf.read_f64::()?; @@ -852,7 +852,7 @@ pub fn path_to_sql( closed: bool, points: I, buf: &mut Vec, -) -> Result<(), StdBox> +) -> Result<(), StdBox> where I: IntoIterator, { @@ -875,7 +875,7 @@ where /// Deserializes a Postgres path. #[inline] -pub fn path_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn path_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { let closed = buf.read_u8()? != 0; let points = buf.read_i32::()?; @@ -918,10 +918,10 @@ pub struct PathPoints<'a> { impl<'a> FallibleIterator for PathPoints<'a> { type Item = Point; - type Error = StdBox; + type Error = StdBox; #[inline] - fn next(&mut self) -> Result, StdBox> { + fn next(&mut self) -> Result, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid message length".into()); @@ -949,7 +949,7 @@ mod test { use std::collections::HashMap; use super::*; - use IsNull; + use crate::IsNull; #[test] fn bool() { From d371e9bd6f14136adcd9474c8e4571bb54bf292e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:26:14 -0800 Subject: [PATCH 028/819] Migrate tokio-postgres-native-tls to 2018 edition --- tokio-postgres-native-tls/Cargo.toml | 1 + tokio-postgres-native-tls/src/lib.rs | 13 ++----------- tokio-postgres-native-tls/src/test.rs | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index dc4270e8a..9ecfb2c3b 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -2,6 +2,7 @@ name = "tokio-postgres-native-tls" version = "0.1.0" authors = ["Steven Fackler "] +edition = "2018" [dependencies] futures = "0.1" diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index cebfe5560..549234670 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -1,15 +1,6 @@ -extern crate native_tls; -extern crate tokio_io; -extern crate tokio_postgres; -extern crate tokio_tls; +#![warn(rust_2018_idioms)] -#[macro_use] -extern crate futures; - -#[cfg(test)] -extern crate tokio; - -use futures::{Async, Future, Poll}; +use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_postgres::{ChannelBinding, TlsConnect}; use tokio_tls::{Connect, TlsStream}; diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 8d081efbc..15371ad69 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -4,7 +4,7 @@ use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; -use TlsConnector; +use crate::TlsConnector; fn smoke_test(builder: &tokio_postgres::Builder, tls: T) where From e0c37c3681c4e90a0e9d474394abbdc8702ebde3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:27:56 -0800 Subject: [PATCH 029/819] Migrate tokio-postgres-openssl to 2018 edition --- tokio-postgres-openssl/Cargo.toml | 1 + tokio-postgres-openssl/src/lib.rs | 13 ++----------- tokio-postgres-openssl/src/test.rs | 2 +- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index a97beedf5..3903050e6 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -2,6 +2,7 @@ name = "tokio-postgres-openssl" version = "0.1.0" authors = ["Steven Fackler "] +edition = "2018" [dependencies] futures = "0.1" diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 802ee0ad8..58aabba56 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,15 +1,6 @@ -extern crate openssl; -extern crate tokio_io; -extern crate tokio_openssl; -extern crate tokio_postgres; +#![warn(rust_2018_idioms)] -#[macro_use] -extern crate futures; - -#[cfg(test)] -extern crate tokio; - -use futures::{Async, Future, Poll}; +use futures::{try_ready, Async, Future, Poll}; use openssl::hash::MessageDigest; use openssl::nid::Nid; use openssl::ssl::{ConnectConfiguration, HandshakeError, SslRef}; diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 7347a2425..72d1dd785 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -4,7 +4,7 @@ use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; -use TlsConnector; +use crate::TlsConnector; fn smoke_test(builder: &tokio_postgres::Builder, tls: T) where From 4d16fbb906d367e6d1823b67ae0e25664073e821 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:39:20 -0800 Subject: [PATCH 030/819] Get rid of postgres-shared --- Cargo.toml | 1 - codegen/src/main.rs | 2 +- postgres-shared/Cargo.toml | 29 - postgres-shared/src/error/mod.rs | 448 ------- postgres-shared/src/error/sqlstate.rs | 1076 ----------------- postgres-shared/src/lib.rs | 32 - postgres-shared/src/params/mod.rs | 295 ----- postgres-shared/src/params/url.rs | 465 ------- tokio-postgres/Cargo.toml | 23 +- tokio-postgres/src/lib.rs | 34 +- tokio-postgres/src/proto/row.rs | 2 +- tokio-postgres/src/proto/statement.rs | 2 +- .../src/rows.rs | 0 .../src/stmt.rs | 0 .../src/types/bit_vec.rs | 0 .../src/types/chrono.rs | 0 .../src/types/eui48.rs | 0 .../src/types/geo.rs | 0 .../src/types/mod.rs | 0 .../src/types/serde_json.rs | 0 .../src/types/special.rs | 0 .../src/types/type_gen.rs | 0 .../src/types/uuid.rs | 0 23 files changed, 43 insertions(+), 2366 deletions(-) delete mode 100644 postgres-shared/Cargo.toml delete mode 100644 postgres-shared/src/error/mod.rs delete mode 100644 postgres-shared/src/error/sqlstate.rs delete mode 100644 postgres-shared/src/lib.rs delete mode 100644 postgres-shared/src/params/mod.rs delete mode 100644 postgres-shared/src/params/url.rs rename {postgres-shared => tokio-postgres}/src/rows.rs (100%) rename {postgres-shared => tokio-postgres}/src/stmt.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/bit_vec.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/chrono.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/eui48.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/geo.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/mod.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/serde_json.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/special.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/type_gen.rs (100%) rename {postgres-shared => tokio-postgres}/src/types/uuid.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 2424197c9..86e7becf2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,6 @@ members = [ "codegen", "postgres-protocol", - "postgres-shared", "tokio-postgres", "tokio-postgres-native-tls", "tokio-postgres-openssl", diff --git a/codegen/src/main.rs b/codegen/src/main.rs index 9aa9a9744..70e734c7f 100644 --- a/codegen/src/main.rs +++ b/codegen/src/main.rs @@ -9,7 +9,7 @@ mod sqlstate; mod type_gen; fn main() { - let path = Path::new("../postgres-shared/src"); + let path = Path::new("../tokio-postgres/src"); sqlstate::build(path); type_gen::build(path); } diff --git a/postgres-shared/Cargo.toml b/postgres-shared/Cargo.toml deleted file mode 100644 index 2bdeb1712..000000000 --- a/postgres-shared/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "postgres-shared" -version = "0.4.1" -authors = ["Steven Fackler "] -license = "MIT" -description = "Internal crate used by postgres and postgres-tokio" -repository = "https://github.com/sfackler/rust-postgres" - -[features] -"with-bit-vec-0.5" = ["bit-vec"] -"with-chrono-0.4" = ["chrono"] -"with-eui48-0.3" = ["eui48"] -"with-geo-0.10" = ["geo"] -with-serde_json-1 = ["serde", "serde_json"] -"with-uuid-0.6" = ["uuid"] - -[dependencies] -hex = "0.3" -fallible-iterator = "0.1.3" -phf = "0.7.22" -postgres-protocol = { version = "0.3", path = "../postgres-protocol" } - -bit-vec = { version = "0.5", optional = true } -chrono = { version = "0.4", optional = true } -eui48 = { version = "0.3", optional = true } -geo = { version = "0.10", optional = true } -serde = { version = "1.0", optional = true } -serde_json = { version = "1.0", optional = true } -uuid = { version = "0.6", optional = true } diff --git a/postgres-shared/src/error/mod.rs b/postgres-shared/src/error/mod.rs deleted file mode 100644 index c437219cb..000000000 --- a/postgres-shared/src/error/mod.rs +++ /dev/null @@ -1,448 +0,0 @@ -//! Errors. - -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; -use std::convert::From; -use std::error; -use std::fmt; -use std::io; - -pub use self::sqlstate::*; - -mod sqlstate; - -/// The severity of a Postgres error or notice. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Severity { - /// PANIC - Panic, - /// FATAL - Fatal, - /// ERROR - Error, - /// WARNING - Warning, - /// NOTICE - Notice, - /// DEBUG - Debug, - /// INFO - Info, - /// LOG - Log, -} - -impl fmt::Display for Severity { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - Severity::Panic => "PANIC", - Severity::Fatal => "FATAL", - Severity::Error => "ERROR", - Severity::Warning => "WARNING", - Severity::Notice => "NOTICE", - Severity::Debug => "DEBUG", - Severity::Info => "INFO", - Severity::Log => "LOG", - }; - fmt.write_str(s) - } -} - -impl Severity { - fn from_str(s: &str) -> Option { - match s { - "PANIC" => Some(Severity::Panic), - "FATAL" => Some(Severity::Fatal), - "ERROR" => Some(Severity::Error), - "WARNING" => Some(Severity::Warning), - "NOTICE" => Some(Severity::Notice), - "DEBUG" => Some(Severity::Debug), - "INFO" => Some(Severity::Info), - "LOG" => Some(Severity::Log), - _ => None, - } - } -} - -/// A Postgres error or notice. -#[derive(Clone, PartialEq, Eq)] -pub struct DbError { - /// The field contents are ERROR, FATAL, or PANIC (in an error message), - /// or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message), or a - /// localized translation of one of these. - pub severity: String, - - /// A parsed, nonlocalized version of `severity`. (PostgreSQL 9.6+) - pub parsed_severity: Option, - - /// The SQLSTATE code for the error. - pub code: SqlState, - - /// The primary human-readable error message. This should be accurate but - /// terse (typically one line). - pub message: String, - - /// An optional secondary error message carrying more detail about the - /// problem. Might run to multiple lines. - pub detail: Option, - - /// An optional suggestion what to do about the problem. This is intended - /// to differ from Detail in that it offers advice (potentially - /// inappropriate) rather than hard facts. Might run to multiple lines. - pub hint: Option, - - /// An optional error cursor position into either the original query string - /// or an internally generated query. - pub position: Option, - - /// An indication of the context in which the error occurred. Presently - /// this includes a call stack traceback of active procedural language - /// functions and internally-generated queries. The trace is one entry per - /// line, most recent first. - pub where_: Option, - - /// If the error was associated with a specific database object, the name - /// of the schema containing that object, if any. (PostgreSQL 9.3+) - pub schema: Option, - - /// If the error was associated with a specific table, the name of the - /// table. (Refer to the schema name field for the name of the table's - /// schema.) (PostgreSQL 9.3+) - pub table: Option, - - /// If the error was associated with a specific table column, the name of - /// the column. (Refer to the schema and table name fields to identify the - /// table.) (PostgreSQL 9.3+) - pub column: Option, - - /// If the error was associated with a specific data type, the name of the - /// data type. (Refer to the schema name field for the name of the data - /// type's schema.) (PostgreSQL 9.3+) - pub datatype: Option, - - /// If the error was associated with a specific constraint, the name of the - /// constraint. Refer to fields listed above for the associated table or - /// domain. (For this purpose, indexes are treated as constraints, even if - /// they weren't created with constraint syntax.) (PostgreSQL 9.3+) - pub constraint: Option, - - /// The file name of the source-code location where the error was reported. - pub file: Option, - - /// The line number of the source-code location where the error was - /// reported. - pub line: Option, - - /// The name of the source-code routine reporting the error. - pub routine: Option, - - _p: (), -} - -impl DbError { - #[doc(hidden)] - pub fn new(fields: &mut ErrorFields) -> io::Result { - let mut severity = None; - let mut parsed_severity = None; - let mut code = None; - let mut message = None; - let mut detail = None; - let mut hint = None; - let mut normal_position = None; - let mut internal_position = None; - let mut internal_query = None; - let mut where_ = None; - let mut schema = None; - let mut table = None; - let mut column = None; - let mut datatype = None; - let mut constraint = None; - let mut file = None; - let mut line = None; - let mut routine = None; - - while let Some(field) = fields.next()? { - match field.type_() { - b'S' => severity = Some(field.value().to_owned()), - b'C' => code = Some(SqlState::from_code(field.value())), - b'M' => message = Some(field.value().to_owned()), - b'D' => detail = Some(field.value().to_owned()), - b'H' => hint = Some(field.value().to_owned()), - b'P' => { - normal_position = Some(field.value().parse::().map_err(|_| { - io::Error::new( - io::ErrorKind::InvalidInput, - "`P` field did not contain an integer", - ) - })?); - } - b'p' => { - internal_position = Some(field.value().parse::().map_err(|_| { - io::Error::new( - io::ErrorKind::InvalidInput, - "`p` field did not contain an integer", - ) - })?); - } - b'q' => internal_query = Some(field.value().to_owned()), - b'W' => where_ = Some(field.value().to_owned()), - b's' => schema = Some(field.value().to_owned()), - b't' => table = Some(field.value().to_owned()), - b'c' => column = Some(field.value().to_owned()), - b'd' => datatype = Some(field.value().to_owned()), - b'n' => constraint = Some(field.value().to_owned()), - b'F' => file = Some(field.value().to_owned()), - b'L' => { - line = Some(field.value().parse::().map_err(|_| { - io::Error::new( - io::ErrorKind::InvalidInput, - "`L` field did not contain an integer", - ) - })?); - } - b'R' => routine = Some(field.value().to_owned()), - b'V' => { - parsed_severity = Some(Severity::from_str(field.value()).ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "`V` field contained an invalid value", - ) - })?); - } - _ => {} - } - } - - Ok(DbError { - severity: severity - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`S` field missing"))?, - parsed_severity: parsed_severity, - code: code - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`C` field missing"))?, - message: message - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`M` field missing"))?, - detail: detail, - hint: hint, - position: match normal_position { - Some(position) => Some(ErrorPosition::Normal(position)), - None => match internal_position { - Some(position) => Some(ErrorPosition::Internal { - position: position, - query: internal_query.ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "`q` field missing but `p` field present", - ) - })?, - }), - None => None, - }, - }, - where_: where_, - schema: schema, - table: table, - column: column, - datatype: datatype, - constraint: constraint, - file: file, - line: line, - routine: routine, - _p: (), - }) - } -} - -// manual impl to leave out _p -impl fmt::Debug for DbError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("DbError") - .field("severity", &self.severity) - .field("parsed_severity", &self.parsed_severity) - .field("code", &self.code) - .field("message", &self.message) - .field("detail", &self.detail) - .field("hint", &self.hint) - .field("position", &self.position) - .field("where_", &self.where_) - .field("schema", &self.schema) - .field("table", &self.table) - .field("column", &self.column) - .field("datatype", &self.datatype) - .field("constraint", &self.constraint) - .field("file", &self.file) - .field("line", &self.line) - .field("routine", &self.routine) - .finish() - } -} - -impl fmt::Display for DbError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}: {}", self.severity, self.message) - } -} - -impl error::Error for DbError { - fn description(&self) -> &str { - &self.message - } -} - -/// Represents the position of an error in a query. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum ErrorPosition { - /// A position in the original query. - Normal(u32), - /// A position in an internally generated query. - Internal { - /// The byte position. - position: u32, - /// A query generated by the Postgres server. - query: String, - }, -} - -#[doc(hidden)] -pub fn connect(e: Box) -> Error { - Error(Box::new(ErrorKind::ConnectParams(e))) -} - -#[doc(hidden)] -pub fn tls(e: Box) -> Error { - Error(Box::new(ErrorKind::Tls(e))) -} - -#[doc(hidden)] -pub fn db(e: DbError) -> Error { - Error(Box::new(ErrorKind::Db(e))) -} - -#[doc(hidden)] -pub fn __db(e: ErrorResponseBody) -> Error { - match DbError::new(&mut e.fields()) { - Ok(e) => Error(Box::new(ErrorKind::Db(e))), - Err(e) => Error(Box::new(ErrorKind::Io(e))), - } -} - -#[doc(hidden)] -pub fn __user(e: T) -> Error -where - T: Into>, -{ - Error(Box::new(ErrorKind::Conversion(e.into()))) -} - -#[doc(hidden)] -pub fn io(e: io::Error) -> Error { - Error(Box::new(ErrorKind::Io(e))) -} - -#[doc(hidden)] -pub fn conversion(e: Box) -> Error { - Error(Box::new(ErrorKind::Conversion(e))) -} - -#[derive(Debug)] -enum ErrorKind { - ConnectParams(Box), - Tls(Box), - Db(DbError), - Io(io::Error), - Conversion(Box), -} - -/// An error communicating with the Postgres server. -#[derive(Debug)] -pub struct Error(Box); - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.write_str(error::Error::description(self))?; - match *self.0 { - ErrorKind::ConnectParams(ref err) => write!(fmt, ": {}", err), - ErrorKind::Tls(ref err) => write!(fmt, ": {}", err), - ErrorKind::Db(ref err) => write!(fmt, ": {}", err), - ErrorKind::Io(ref err) => write!(fmt, ": {}", err), - ErrorKind::Conversion(ref err) => write!(fmt, ": {}", err), - } - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match *self.0 { - ErrorKind::ConnectParams(_) => "invalid connection parameters", - ErrorKind::Tls(_) => "TLS handshake error", - ErrorKind::Db(_) => "database error", - ErrorKind::Io(_) => "IO error", - ErrorKind::Conversion(_) => "type conversion error", - } - } - - fn cause(&self) -> Option<&error::Error> { - match *self.0 { - ErrorKind::ConnectParams(ref err) => Some(&**err), - ErrorKind::Tls(ref err) => Some(&**err), - ErrorKind::Db(ref err) => Some(err), - ErrorKind::Io(ref err) => Some(err), - ErrorKind::Conversion(ref err) => Some(&**err), - } - } -} - -impl Error { - /// Returns the SQLSTATE error code associated with this error if it is a DB - /// error. - pub fn code(&self) -> Option<&SqlState> { - self.as_db().map(|e| &e.code) - } - - /// Returns the inner error if this is a connection parameter error. - pub fn as_connection(&self) -> Option<&(error::Error + 'static + Sync + Send)> { - match *self.0 { - ErrorKind::ConnectParams(ref err) => Some(&**err), - _ => None, - } - } - - /// Returns the `DbError` associated with this error if it is a DB error. - pub fn as_db(&self) -> Option<&DbError> { - match *self.0 { - ErrorKind::Db(ref err) => Some(err), - _ => None, - } - } - - /// Returns the inner error if this is a conversion error. - pub fn as_conversion(&self) -> Option<&(error::Error + 'static + Sync + Send)> { - match *self.0 { - ErrorKind::Conversion(ref err) => Some(&**err), - _ => None, - } - } - - /// Returns the inner `io::Error` associated with this error if it is an IO - /// error. - pub fn as_io(&self) -> Option<&io::Error> { - match *self.0 { - ErrorKind::Io(ref err) => Some(err), - _ => None, - } - } -} - -impl From for Error { - fn from(err: io::Error) -> Error { - Error(Box::new(ErrorKind::Io(err))) - } -} - -impl From for io::Error { - fn from(err: Error) -> io::Error { - match *err.0 { - ErrorKind::Io(e) => e, - _ => io::Error::new(io::ErrorKind::Other, err), - } - } -} diff --git a/postgres-shared/src/error/sqlstate.rs b/postgres-shared/src/error/sqlstate.rs deleted file mode 100644 index 7dddfc7e2..000000000 --- a/postgres-shared/src/error/sqlstate.rs +++ /dev/null @@ -1,1076 +0,0 @@ -// Autogenerated file - DO NOT EDIT -use phf; -use std::borrow::Cow; - -/// A SQLSTATE error code -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct SqlState(Cow<'static, str>); - -impl SqlState { - /// Creates a `SqlState` from its error code. - pub fn from_code(s: &str) -> SqlState { - match SQLSTATE_MAP.get(s) { - Some(state) => state.clone(), - None => SqlState(Cow::Owned(s.to_string())), - } - } - - /// Returns the error code corresponding to the `SqlState`. - pub fn code(&self) -> &str { - &self.0 - } - - /// 00000 - pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Cow::Borrowed("00000")); - - /// 01000 - pub const WARNING: SqlState = SqlState(Cow::Borrowed("01000")); - - /// 0100C - pub const WARNING_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("0100C")); - - /// 01008 - pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); - - /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("01003")); - - /// 01007 - pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); - - /// 01006 - pub const WARNING_PRIVILEGE_NOT_REVOKED: SqlState = SqlState(Cow::Borrowed("01006")); - - /// 01004 - pub const WARNING_STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("01004")); - - /// 01P01 - pub const WARNING_DEPRECATED_FEATURE: SqlState = SqlState(Cow::Borrowed("01P01")); - - /// 02000 - pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); - - /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = - SqlState(Cow::Borrowed("02001")); - - /// 03000 - pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); - - /// 08000 - pub const CONNECTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("08000")); - - /// 08003 - pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(Cow::Borrowed("08003")); - - /// 08006 - pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); - - /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08001")); - - /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08004")); - - /// 08007 - pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); - - /// 08P01 - pub const PROTOCOL_VIOLATION: SqlState = SqlState(Cow::Borrowed("08P01")); - - /// 09000 - pub const TRIGGERED_ACTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("09000")); - - /// 0A000 - pub const FEATURE_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("0A000")); - - /// 0B000 - pub const INVALID_TRANSACTION_INITIATION: SqlState = SqlState(Cow::Borrowed("0B000")); - - /// 0F000 - pub const LOCATOR_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0F000")); - - /// 0F001 - pub const L_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0F001")); - - /// 0L000 - pub const INVALID_GRANTOR: SqlState = SqlState(Cow::Borrowed("0L000")); - - /// 0LP01 - pub const INVALID_GRANT_OPERATION: SqlState = SqlState(Cow::Borrowed("0LP01")); - - /// 0P000 - pub const INVALID_ROLE_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0P000")); - - /// 0Z000 - pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); - - /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = - SqlState(Cow::Borrowed("0Z002")); - - /// 20000 - pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); - - /// 21000 - pub const CARDINALITY_VIOLATION: SqlState = SqlState(Cow::Borrowed("21000")); - - /// 22000 - pub const DATA_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22000")); - - /// 2202E - pub const ARRAY_ELEMENT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); - - /// 2202E - pub const ARRAY_SUBSCRIPT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); - - /// 22021 - pub const CHARACTER_NOT_IN_REPERTOIRE: SqlState = SqlState(Cow::Borrowed("22021")); - - /// 22008 - pub const DATETIME_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22008")); - - /// 22008 - pub const DATETIME_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22008")); - - /// 22012 - pub const DIVISION_BY_ZERO: SqlState = SqlState(Cow::Borrowed("22012")); - - /// 22005 - pub const ERROR_IN_ASSIGNMENT: SqlState = SqlState(Cow::Borrowed("22005")); - - /// 2200B - pub const ESCAPE_CHARACTER_CONFLICT: SqlState = SqlState(Cow::Borrowed("2200B")); - - /// 22022 - pub const INDICATOR_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22022")); - - /// 22015 - pub const INTERVAL_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22015")); - - /// 2201E - pub const INVALID_ARGUMENT_FOR_LOG: SqlState = SqlState(Cow::Borrowed("2201E")); - - /// 22014 - pub const INVALID_ARGUMENT_FOR_NTILE: SqlState = SqlState(Cow::Borrowed("22014")); - - /// 22016 - pub const INVALID_ARGUMENT_FOR_NTH_VALUE: SqlState = SqlState(Cow::Borrowed("22016")); - - /// 2201F - pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); - - /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("2201G")); - - /// 22018 - pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); - - /// 22007 - pub const INVALID_DATETIME_FORMAT: SqlState = SqlState(Cow::Borrowed("22007")); - - /// 22019 - pub const INVALID_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22019")); - - /// 2200D - pub const INVALID_ESCAPE_OCTET: SqlState = SqlState(Cow::Borrowed("2200D")); - - /// 22025 - pub const INVALID_ESCAPE_SEQUENCE: SqlState = SqlState(Cow::Borrowed("22025")); - - /// 22P06 - pub const NONSTANDARD_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P06")); - - /// 22010 - pub const INVALID_INDICATOR_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22010")); - - /// 22023 - pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22023")); - - /// 2201B - pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Cow::Borrowed("2201B")); - - /// 2201W - pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); - - /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = - SqlState(Cow::Borrowed("2201X")); - - /// 2202H - pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); - - /// 2202G - pub const INVALID_TABLESAMPLE_REPEAT: SqlState = SqlState(Cow::Borrowed("2202G")); - - /// 22009 - pub const INVALID_TIME_ZONE_DISPLACEMENT_VALUE: SqlState = SqlState(Cow::Borrowed("22009")); - - /// 2200C - pub const INVALID_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("2200C")); - - /// 2200G - pub const MOST_SPECIFIC_TYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("2200G")); - - /// 22004 - pub const NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("22004")); - - /// 22002 - pub const NULL_VALUE_NO_INDICATOR_PARAMETER: SqlState = SqlState(Cow::Borrowed("22002")); - - /// 22003 - pub const NUMERIC_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22003")); - - /// 2200H - pub const SEQUENCE_GENERATOR_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("2200H")); - - /// 22026 - pub const STRING_DATA_LENGTH_MISMATCH: SqlState = SqlState(Cow::Borrowed("22026")); - - /// 22001 - pub const STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("22001")); - - /// 22011 - pub const SUBSTRING_ERROR: SqlState = SqlState(Cow::Borrowed("22011")); - - /// 22027 - pub const TRIM_ERROR: SqlState = SqlState(Cow::Borrowed("22027")); - - /// 22024 - pub const UNTERMINATED_C_STRING: SqlState = SqlState(Cow::Borrowed("22024")); - - /// 2200F - pub const ZERO_LENGTH_CHARACTER_STRING: SqlState = SqlState(Cow::Borrowed("2200F")); - - /// 22P01 - pub const FLOATING_POINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22P01")); - - /// 22P02 - pub const INVALID_TEXT_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P02")); - - /// 22P03 - pub const INVALID_BINARY_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P03")); - - /// 22P04 - pub const BAD_COPY_FILE_FORMAT: SqlState = SqlState(Cow::Borrowed("22P04")); - - /// 22P05 - pub const UNTRANSLATABLE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P05")); - - /// 2200L - pub const NOT_AN_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200L")); - - /// 2200M - pub const INVALID_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200M")); - - /// 2200N - pub const INVALID_XML_CONTENT: SqlState = SqlState(Cow::Borrowed("2200N")); - - /// 2200S - pub const INVALID_XML_COMMENT: SqlState = SqlState(Cow::Borrowed("2200S")); - - /// 2200T - pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Cow::Borrowed("2200T")); - - /// 23000 - pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23000")); - - /// 23001 - pub const RESTRICT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23001")); - - /// 23502 - pub const NOT_NULL_VIOLATION: SqlState = SqlState(Cow::Borrowed("23502")); - - /// 23503 - pub const FOREIGN_KEY_VIOLATION: SqlState = SqlState(Cow::Borrowed("23503")); - - /// 23505 - pub const UNIQUE_VIOLATION: SqlState = SqlState(Cow::Borrowed("23505")); - - /// 23514 - pub const CHECK_VIOLATION: SqlState = SqlState(Cow::Borrowed("23514")); - - /// 23P01 - pub const EXCLUSION_VIOLATION: SqlState = SqlState(Cow::Borrowed("23P01")); - - /// 24000 - pub const INVALID_CURSOR_STATE: SqlState = SqlState(Cow::Borrowed("24000")); - - /// 25000 - pub const INVALID_TRANSACTION_STATE: SqlState = SqlState(Cow::Borrowed("25000")); - - /// 25001 - pub const ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25001")); - - /// 25002 - pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); - - /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = - SqlState(Cow::Borrowed("25008")); - - /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25003")); - - /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25004")); - - /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25005")); - - /// 25006 - pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); - - /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = - SqlState(Cow::Borrowed("25007")); - - /// 25P01 - pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); - - /// 25P02 - pub const IN_FAILED_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P02")); - - /// 25P03 - pub const IDLE_IN_TRANSACTION_SESSION_TIMEOUT: SqlState = SqlState(Cow::Borrowed("25P03")); - - /// 26000 - pub const INVALID_SQL_STATEMENT_NAME: SqlState = SqlState(Cow::Borrowed("26000")); - - /// 26000 - pub const UNDEFINED_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("26000")); - - /// 27000 - pub const TRIGGERED_DATA_CHANGE_VIOLATION: SqlState = SqlState(Cow::Borrowed("27000")); - - /// 28000 - pub const INVALID_AUTHORIZATION_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("28000")); - - /// 28P01 - pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); - - /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = - SqlState(Cow::Borrowed("2B000")); - - /// 2BP01 - pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); - - /// 2D000 - pub const INVALID_TRANSACTION_TERMINATION: SqlState = SqlState(Cow::Borrowed("2D000")); - - /// 2F000 - pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); - - /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = - SqlState(Cow::Borrowed("2F005")); - - /// 2F002 - pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); - - /// 2F003 - pub const S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("2F003")); - - /// 2F004 - pub const S_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F004")); - - /// 34000 - pub const INVALID_CURSOR_NAME: SqlState = SqlState(Cow::Borrowed("34000")); - - /// 34000 - pub const UNDEFINED_CURSOR: SqlState = SqlState(Cow::Borrowed("34000")); - - /// 38000 - pub const EXTERNAL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("38000")); - - /// 38001 - pub const E_R_E_CONTAINING_SQL_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38001")); - - /// 38002 - pub const E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38002")); - - /// 38003 - pub const E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("38003")); - - /// 38004 - pub const E_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38004")); - - /// 39000 - pub const EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("39000")); - - /// 39001 - pub const E_R_I_E_INVALID_SQLSTATE_RETURNED: SqlState = SqlState(Cow::Borrowed("39001")); - - /// 39004 - pub const E_R_I_E_NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("39004")); - - /// 39P01 - pub const E_R_I_E_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P01")); - - /// 39P02 - pub const E_R_I_E_SRF_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P02")); - - /// 39P03 - pub const E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P03")); - - /// 3B000 - pub const SAVEPOINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("3B000")); - - /// 3B001 - pub const S_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("3B001")); - - /// 3D000 - pub const INVALID_CATALOG_NAME: SqlState = SqlState(Cow::Borrowed("3D000")); - - /// 3D000 - pub const UNDEFINED_DATABASE: SqlState = SqlState(Cow::Borrowed("3D000")); - - /// 3F000 - pub const INVALID_SCHEMA_NAME: SqlState = SqlState(Cow::Borrowed("3F000")); - - /// 3F000 - pub const UNDEFINED_SCHEMA: SqlState = SqlState(Cow::Borrowed("3F000")); - - /// 40000 - pub const TRANSACTION_ROLLBACK: SqlState = SqlState(Cow::Borrowed("40000")); - - /// 40002 - pub const T_R_INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("40002")); - - /// 40001 - pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(Cow::Borrowed("40001")); - - /// 40003 - pub const T_R_STATEMENT_COMPLETION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("40003")); - - /// 40P01 - pub const T_R_DEADLOCK_DETECTED: SqlState = SqlState(Cow::Borrowed("40P01")); - - /// 42000 - pub const SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: SqlState = SqlState(Cow::Borrowed("42000")); - - /// 42601 - pub const SYNTAX_ERROR: SqlState = SqlState(Cow::Borrowed("42601")); - - /// 42501 - pub const INSUFFICIENT_PRIVILEGE: SqlState = SqlState(Cow::Borrowed("42501")); - - /// 42846 - pub const CANNOT_COERCE: SqlState = SqlState(Cow::Borrowed("42846")); - - /// 42803 - pub const GROUPING_ERROR: SqlState = SqlState(Cow::Borrowed("42803")); - - /// 42P20 - pub const WINDOWING_ERROR: SqlState = SqlState(Cow::Borrowed("42P20")); - - /// 42P19 - pub const INVALID_RECURSION: SqlState = SqlState(Cow::Borrowed("42P19")); - - /// 42830 - pub const INVALID_FOREIGN_KEY: SqlState = SqlState(Cow::Borrowed("42830")); - - /// 42602 - pub const INVALID_NAME: SqlState = SqlState(Cow::Borrowed("42602")); - - /// 42622 - pub const NAME_TOO_LONG: SqlState = SqlState(Cow::Borrowed("42622")); - - /// 42939 - pub const RESERVED_NAME: SqlState = SqlState(Cow::Borrowed("42939")); - - /// 42804 - pub const DATATYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("42804")); - - /// 42P18 - pub const INDETERMINATE_DATATYPE: SqlState = SqlState(Cow::Borrowed("42P18")); - - /// 42P21 - pub const COLLATION_MISMATCH: SqlState = SqlState(Cow::Borrowed("42P21")); - - /// 42P22 - pub const INDETERMINATE_COLLATION: SqlState = SqlState(Cow::Borrowed("42P22")); - - /// 42809 - pub const WRONG_OBJECT_TYPE: SqlState = SqlState(Cow::Borrowed("42809")); - - /// 428C9 - pub const GENERATED_ALWAYS: SqlState = SqlState(Cow::Borrowed("428C9")); - - /// 42703 - pub const UNDEFINED_COLUMN: SqlState = SqlState(Cow::Borrowed("42703")); - - /// 42883 - pub const UNDEFINED_FUNCTION: SqlState = SqlState(Cow::Borrowed("42883")); - - /// 42P01 - pub const UNDEFINED_TABLE: SqlState = SqlState(Cow::Borrowed("42P01")); - - /// 42P02 - pub const UNDEFINED_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P02")); - - /// 42704 - pub const UNDEFINED_OBJECT: SqlState = SqlState(Cow::Borrowed("42704")); - - /// 42701 - pub const DUPLICATE_COLUMN: SqlState = SqlState(Cow::Borrowed("42701")); - - /// 42P03 - pub const DUPLICATE_CURSOR: SqlState = SqlState(Cow::Borrowed("42P03")); - - /// 42P04 - pub const DUPLICATE_DATABASE: SqlState = SqlState(Cow::Borrowed("42P04")); - - /// 42723 - pub const DUPLICATE_FUNCTION: SqlState = SqlState(Cow::Borrowed("42723")); - - /// 42P05 - pub const DUPLICATE_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("42P05")); - - /// 42P06 - pub const DUPLICATE_SCHEMA: SqlState = SqlState(Cow::Borrowed("42P06")); - - /// 42P07 - pub const DUPLICATE_TABLE: SqlState = SqlState(Cow::Borrowed("42P07")); - - /// 42712 - pub const DUPLICATE_ALIAS: SqlState = SqlState(Cow::Borrowed("42712")); - - /// 42710 - pub const DUPLICATE_OBJECT: SqlState = SqlState(Cow::Borrowed("42710")); - - /// 42702 - pub const AMBIGUOUS_COLUMN: SqlState = SqlState(Cow::Borrowed("42702")); - - /// 42725 - pub const AMBIGUOUS_FUNCTION: SqlState = SqlState(Cow::Borrowed("42725")); - - /// 42P08 - pub const AMBIGUOUS_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P08")); - - /// 42P09 - pub const AMBIGUOUS_ALIAS: SqlState = SqlState(Cow::Borrowed("42P09")); - - /// 42P10 - pub const INVALID_COLUMN_REFERENCE: SqlState = SqlState(Cow::Borrowed("42P10")); - - /// 42611 - pub const INVALID_COLUMN_DEFINITION: SqlState = SqlState(Cow::Borrowed("42611")); - - /// 42P11 - pub const INVALID_CURSOR_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P11")); - - /// 42P12 - pub const INVALID_DATABASE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P12")); - - /// 42P13 - pub const INVALID_FUNCTION_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P13")); - - /// 42P14 - pub const INVALID_PSTATEMENT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P14")); - - /// 42P15 - pub const INVALID_SCHEMA_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P15")); - - /// 42P16 - pub const INVALID_TABLE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P16")); - - /// 42P17 - pub const INVALID_OBJECT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P17")); - - /// 44000 - pub const WITH_CHECK_OPTION_VIOLATION: SqlState = SqlState(Cow::Borrowed("44000")); - - /// 53000 - pub const INSUFFICIENT_RESOURCES: SqlState = SqlState(Cow::Borrowed("53000")); - - /// 53100 - pub const DISK_FULL: SqlState = SqlState(Cow::Borrowed("53100")); - - /// 53200 - pub const OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("53200")); - - /// 53300 - pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(Cow::Borrowed("53300")); - - /// 53400 - pub const CONFIGURATION_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("53400")); - - /// 54000 - pub const PROGRAM_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("54000")); - - /// 54001 - pub const STATEMENT_TOO_COMPLEX: SqlState = SqlState(Cow::Borrowed("54001")); - - /// 54011 - pub const TOO_MANY_COLUMNS: SqlState = SqlState(Cow::Borrowed("54011")); - - /// 54023 - pub const TOO_MANY_ARGUMENTS: SqlState = SqlState(Cow::Borrowed("54023")); - - /// 55000 - pub const OBJECT_NOT_IN_PREREQUISITE_STATE: SqlState = SqlState(Cow::Borrowed("55000")); - - /// 55006 - pub const OBJECT_IN_USE: SqlState = SqlState(Cow::Borrowed("55006")); - - /// 55P02 - pub const CANT_CHANGE_RUNTIME_PARAM: SqlState = SqlState(Cow::Borrowed("55P02")); - - /// 55P03 - pub const LOCK_NOT_AVAILABLE: SqlState = SqlState(Cow::Borrowed("55P03")); - - /// 55P04 - pub const UNSAFE_NEW_ENUM_VALUE_USAGE: SqlState = SqlState(Cow::Borrowed("55P04")); - - /// 57000 - pub const OPERATOR_INTERVENTION: SqlState = SqlState(Cow::Borrowed("57000")); - - /// 57014 - pub const QUERY_CANCELED: SqlState = SqlState(Cow::Borrowed("57014")); - - /// 57P01 - pub const ADMIN_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P01")); - - /// 57P02 - pub const CRASH_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P02")); - - /// 57P03 - pub const CANNOT_CONNECT_NOW: SqlState = SqlState(Cow::Borrowed("57P03")); - - /// 57P04 - pub const DATABASE_DROPPED: SqlState = SqlState(Cow::Borrowed("57P04")); - - /// 58000 - pub const SYSTEM_ERROR: SqlState = SqlState(Cow::Borrowed("58000")); - - /// 58030 - pub const IO_ERROR: SqlState = SqlState(Cow::Borrowed("58030")); - - /// 58P01 - pub const UNDEFINED_FILE: SqlState = SqlState(Cow::Borrowed("58P01")); - - /// 58P02 - pub const DUPLICATE_FILE: SqlState = SqlState(Cow::Borrowed("58P02")); - - /// 72000 - pub const SNAPSHOT_TOO_OLD: SqlState = SqlState(Cow::Borrowed("72000")); - - /// F0000 - pub const CONFIG_FILE_ERROR: SqlState = SqlState(Cow::Borrowed("F0000")); - - /// F0001 - pub const LOCK_FILE_EXISTS: SqlState = SqlState(Cow::Borrowed("F0001")); - - /// HV000 - pub const FDW_ERROR: SqlState = SqlState(Cow::Borrowed("HV000")); - - /// HV005 - pub const FDW_COLUMN_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV005")); - - /// HV002 - pub const FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: SqlState = SqlState(Cow::Borrowed("HV002")); - - /// HV010 - pub const FDW_FUNCTION_SEQUENCE_ERROR: SqlState = SqlState(Cow::Borrowed("HV010")); - - /// HV021 - pub const FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: SqlState = SqlState(Cow::Borrowed("HV021")); - - /// HV024 - pub const FDW_INVALID_ATTRIBUTE_VALUE: SqlState = SqlState(Cow::Borrowed("HV024")); - - /// HV007 - pub const FDW_INVALID_COLUMN_NAME: SqlState = SqlState(Cow::Borrowed("HV007")); - - /// HV008 - pub const FDW_INVALID_COLUMN_NUMBER: SqlState = SqlState(Cow::Borrowed("HV008")); - - /// HV004 - pub const FDW_INVALID_DATA_TYPE: SqlState = SqlState(Cow::Borrowed("HV004")); - - /// HV006 - pub const FDW_INVALID_DATA_TYPE_DESCRIPTORS: SqlState = SqlState(Cow::Borrowed("HV006")); - - /// HV091 - pub const FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: SqlState = SqlState(Cow::Borrowed("HV091")); - - /// HV00B - pub const FDW_INVALID_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00B")); - - /// HV00C - pub const FDW_INVALID_OPTION_INDEX: SqlState = SqlState(Cow::Borrowed("HV00C")); - - /// HV00D - pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); - - /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = - SqlState(Cow::Borrowed("HV090")); - - /// HV00A - pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); - - /// HV009 - pub const FDW_INVALID_USE_OF_NULL_POINTER: SqlState = SqlState(Cow::Borrowed("HV009")); - - /// HV014 - pub const FDW_TOO_MANY_HANDLES: SqlState = SqlState(Cow::Borrowed("HV014")); - - /// HV001 - pub const FDW_OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("HV001")); - - /// HV00P - pub const FDW_NO_SCHEMAS: SqlState = SqlState(Cow::Borrowed("HV00P")); - - /// HV00J - pub const FDW_OPTION_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00J")); - - /// HV00K - pub const FDW_REPLY_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00K")); - - /// HV00Q - pub const FDW_SCHEMA_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00Q")); - - /// HV00R - pub const FDW_TABLE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00R")); - - /// HV00L - pub const FDW_UNABLE_TO_CREATE_EXECUTION: SqlState = SqlState(Cow::Borrowed("HV00L")); - - /// HV00M - pub const FDW_UNABLE_TO_CREATE_REPLY: SqlState = SqlState(Cow::Borrowed("HV00M")); - - /// HV00N - pub const FDW_UNABLE_TO_ESTABLISH_CONNECTION: SqlState = SqlState(Cow::Borrowed("HV00N")); - - /// P0000 - pub const PLPGSQL_ERROR: SqlState = SqlState(Cow::Borrowed("P0000")); - - /// P0001 - pub const RAISE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("P0001")); - - /// P0002 - pub const NO_DATA_FOUND: SqlState = SqlState(Cow::Borrowed("P0002")); - - /// P0003 - pub const TOO_MANY_ROWS: SqlState = SqlState(Cow::Borrowed("P0003")); - - /// P0004 - pub const ASSERT_FAILURE: SqlState = SqlState(Cow::Borrowed("P0004")); - - /// XX000 - pub const INTERNAL_ERROR: SqlState = SqlState(Cow::Borrowed("XX000")); - - /// XX001 - pub const DATA_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX001")); - - /// XX002 - pub const INDEX_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX002")); -} -#[cfg_attr(rustfmt, rustfmt_skip)] -static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 1897749892740154578, - disps: ::phf::Slice::Static(&[ - (1, 99), - (0, 0), - (1, 5), - (0, 3), - (0, 110), - (0, 54), - (0, 3), - (0, 13), - (0, 0), - (0, 24), - (0, 214), - (0, 52), - (1, 34), - (0, 33), - (0, 44), - (0, 130), - (0, 16), - (0, 187), - (0, 3), - (13, 168), - (0, 4), - (0, 19), - (0, 13), - (0, 87), - (0, 0), - (0, 108), - (0, 123), - (7, 181), - (0, 109), - (0, 32), - (0, 0), - (1, 69), - (1, 81), - (1, 219), - (0, 157), - (2, 41), - (8, 141), - (0, 5), - (0, 0), - (1, 6), - (0, 3), - (1, 146), - (1, 227), - (9, 94), - (10, 158), - (29, 65), - (3, 2), - (0, 33), - (1, 94), - ]), - entries: ::phf::Slice::Static(&[ - ("23001", SqlState::RESTRICT_VIOLATION), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("P0000", SqlState::PLPGSQL_ERROR), - ("58000", SqlState::SYSTEM_ERROR), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("20000", SqlState::CASE_NOT_FOUND), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("08006", SqlState::CONNECTION_FAILURE), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("57014", SqlState::QUERY_CANCELED), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("01000", SqlState::WARNING), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("42846", SqlState::CANNOT_COERCE), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("XX000", SqlState::INTERNAL_ERROR), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("42704", SqlState::UNDEFINED_OBJECT), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("0L000", SqlState::INVALID_GRANTOR), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("42939", SqlState::RESERVED_NAME), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("42P19", SqlState::INVALID_RECURSION), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("58P02", SqlState::DUPLICATE_FILE), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("P0002", SqlState::NO_DATA_FOUND), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("22027", SqlState::TRIM_ERROR), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("42602", SqlState::INVALID_NAME), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("22000", SqlState::DATA_EXCEPTION), - ("28P01", SqlState::INVALID_PASSWORD), - ("23514", SqlState::CHECK_VIOLATION), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("42701", SqlState::DUPLICATE_COLUMN), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("P0001", SqlState::RAISE_EXCEPTION), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("XX002", SqlState::INDEX_CORRUPTED), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("22012", SqlState::DIVISION_BY_ZERO), - ("XX001", SqlState::DATA_CORRUPTED), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("42P01", SqlState::UNDEFINED_TABLE), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("P0004", SqlState::ASSERT_FAILURE), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("42703", SqlState::UNDEFINED_COLUMN), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("22011", SqlState::SUBSTRING_ERROR), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("42803", SqlState::GROUPING_ERROR), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("42601", SqlState::SYNTAX_ERROR), - ("42622", SqlState::NAME_TOO_LONG), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("42P21", SqlState::COLLATION_MISMATCH), - ("23505", SqlState::UNIQUE_VIOLATION), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("58P01", SqlState::UNDEFINED_FILE), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("58030", SqlState::IO_ERROR), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("42710", SqlState::DUPLICATE_OBJECT), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("42804", SqlState::DATATYPE_MISMATCH), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("42P07", SqlState::DUPLICATE_TABLE), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("HV000", SqlState::FDW_ERROR), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("P0003", SqlState::TOO_MANY_ROWS), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("55006", SqlState::OBJECT_IN_USE), - ("53200", SqlState::OUT_OF_MEMORY), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("53100", SqlState::DISK_FULL), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("428C9", SqlState::GENERATED_ALWAYS), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("02000", SqlState::NO_DATA), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("42P20", SqlState::WINDOWING_ERROR), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("57P04", SqlState::DATABASE_DROPPED), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("57P03", SqlState::CANNOT_CONNECT_NOW), - ]), -}; diff --git a/postgres-shared/src/lib.rs b/postgres-shared/src/lib.rs deleted file mode 100644 index 84506f7db..000000000 --- a/postgres-shared/src/lib.rs +++ /dev/null @@ -1,32 +0,0 @@ -#![allow(unknown_lints)] // for clippy - -extern crate fallible_iterator; -extern crate hex; -extern crate phf; -extern crate postgres_protocol; - -pub mod error; -pub mod params; -pub mod rows; -pub mod stmt; -pub mod types; - -/// Contains information necessary to cancel queries for a session. -#[derive(Copy, Clone, Debug)] -pub struct CancelData { - /// The process ID of the session. - pub process_id: i32, - /// The secret key for the session. - pub secret_key: i32, -} - -/// An asynchronous notification. -#[derive(Clone, Debug)] -pub struct Notification { - /// The process ID of the notifying backend process. - pub process_id: i32, - /// The name of the channel that the notify has been raised on. - pub channel: String, - /// The "payload" string passed from the notifying process. - pub payload: String, -} diff --git a/postgres-shared/src/params/mod.rs b/postgres-shared/src/params/mod.rs deleted file mode 100644 index 296483f9d..000000000 --- a/postgres-shared/src/params/mod.rs +++ /dev/null @@ -1,295 +0,0 @@ -//! Connection parameters -use std::error::Error; -use std::mem; -use std::path::PathBuf; -use std::str::FromStr; -use std::time::Duration; - -use error; -use params::url::Url; - -mod url; - -/// The host. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Host { - /// A TCP hostname. - Tcp(String), - /// The path to a directory containing the server's Unix socket. - Unix(PathBuf), -} - -/// Authentication information. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct User { - name: String, - password: Option, -} - -impl User { - /// The username. - pub fn name(&self) -> &str { - &self.name - } - - /// An optional password. - pub fn password(&self) -> Option<&str> { - self.password.as_ref().map(|p| &**p) - } -} - -/// Information necessary to open a new connection to a Postgres server. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct ConnectParams { - host: Host, - port: u16, - user: Option, - database: Option, - options: Vec<(String, String)>, - connect_timeout: Option, - keepalive: Option, -} - -impl ConnectParams { - /// Returns a new builder. - pub fn builder() -> Builder { - Builder::new() - } - - /// The target host. - pub fn host(&self) -> &Host { - &self.host - } - - /// The target port. - /// - /// Defaults to 5432. - pub fn port(&self) -> u16 { - self.port - } - - /// The user to log in as. - /// - /// A user is required to open a new connection but not to cancel a query. - pub fn user(&self) -> Option<&User> { - self.user.as_ref() - } - - /// The database to connect to. - pub fn database(&self) -> Option<&str> { - self.database.as_ref().map(|d| &**d) - } - - /// Runtime parameters to be passed to the Postgres backend. - pub fn options(&self) -> &[(String, String)] { - &self.options - } - - /// A timeout to apply to each socket-level connection attempt. - pub fn connect_timeout(&self) -> Option { - self.connect_timeout - } - - /// The interval at which TCP keepalive messages are sent on the socket. - /// - /// This is ignored for Unix sockets. - pub fn keepalive(&self) -> Option { - self.keepalive - } -} - -impl FromStr for ConnectParams { - type Err = error::Error; - - fn from_str(s: &str) -> Result { - s.into_connect_params().map_err(error::connect) - } -} - -/// A builder for `ConnectParams`. -pub struct Builder { - port: u16, - user: Option, - database: Option, - options: Vec<(String, String)>, - connect_timeout: Option, - keepalive: Option, -} - -impl Builder { - /// Creates a new builder. - pub fn new() -> Builder { - Builder { - port: 5432, - user: None, - database: None, - options: vec![], - connect_timeout: None, - keepalive: None, - } - } - - /// Sets the port. - pub fn port(&mut self, port: u16) -> &mut Builder { - self.port = port; - self - } - - /// Sets the user. - pub fn user(&mut self, name: &str, password: Option<&str>) -> &mut Builder { - self.user = Some(User { - name: name.to_string(), - password: password.map(ToString::to_string), - }); - self - } - - /// Sets the database. - pub fn database(&mut self, database: &str) -> &mut Builder { - self.database = Some(database.to_string()); - self - } - - /// Adds a runtime parameter. - pub fn option(&mut self, name: &str, value: &str) -> &mut Builder { - self.options.push((name.to_string(), value.to_string())); - self - } - - /// Sets the connection timeout. - pub fn connect_timeout(&mut self, connect_timeout: Option) -> &mut Builder { - self.connect_timeout = connect_timeout; - self - } - - /// Sets the keepalive interval. - pub fn keepalive(&mut self, keepalive: Option) -> &mut Builder { - self.keepalive = keepalive; - self - } - - /// Constructs a `ConnectParams` from the builder. - pub fn build(&mut self, host: Host) -> ConnectParams { - ConnectParams { - host: host, - port: self.port, - user: self.user.take(), - database: self.database.take(), - options: mem::replace(&mut self.options, vec![]), - connect_timeout: self.connect_timeout, - keepalive: self.keepalive, - } - } -} - -/// A trait implemented by types that can be converted into a `ConnectParams`. -pub trait IntoConnectParams { - /// Converts the value of `self` into a `ConnectParams`. - fn into_connect_params(self) -> Result>; -} - -impl IntoConnectParams for ConnectParams { - fn into_connect_params(self) -> Result> { - Ok(self) - } -} - -impl<'a> IntoConnectParams for &'a str { - fn into_connect_params(self) -> Result> { - match Url::parse(self) { - Ok(url) => url.into_connect_params(), - Err(err) => Err(err.into()), - } - } -} - -impl IntoConnectParams for String { - fn into_connect_params(self) -> Result> { - self.as_str().into_connect_params() - } -} - -impl IntoConnectParams for Url { - fn into_connect_params(self) -> Result> { - let Url { - host, - port, - user, - path: - url::Path { - path, - query: options, - .. - }, - .. - } = self; - - let mut builder = ConnectParams::builder(); - - if let Some(port) = port { - builder.port(port); - } - - if let Some(info) = user { - builder.user(&info.user, info.pass.as_ref().map(|p| &**p)); - } - - if !path.is_empty() { - // path contains the leading / - builder.database(&path[1..]); - } - - for (name, value) in options { - match &*name { - "connect_timeout" => { - let timeout = value.parse().map_err(|_| "invalid connect_timeout")?; - let timeout = Duration::from_secs(timeout); - builder.connect_timeout(Some(timeout)); - } - "keepalive" => { - let keepalive = value.parse().map_err(|_| "invalid keepalive")?; - let keepalive = Duration::from_secs(keepalive); - builder.keepalive(Some(keepalive)); - } - _ => { - builder.option(&name, &value); - } - } - } - - let maybe_path = url::decode_component(&host)?; - let host = if maybe_path.starts_with('/') { - Host::Unix(maybe_path.into()) - } else { - Host::Tcp(maybe_path) - }; - - Ok(builder.build(host)) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn parse_url() { - let params = "postgres://user@host:44/dbname?connect_timeout=10&application_name=foo"; - let params = params.into_connect_params().unwrap(); - assert_eq!( - params.user(), - Some(&User { - name: "user".to_string(), - password: None, - }) - ); - assert_eq!(params.host(), &Host::Tcp("host".to_string())); - assert_eq!(params.port(), 44); - assert_eq!(params.database(), Some("dbname")); - assert_eq!( - params.options(), - &[("application_name".to_string(), "foo".to_string())][..] - ); - assert_eq!(params.connect_timeout(), Some(Duration::from_secs(10))); - } -} diff --git a/postgres-shared/src/params/url.rs b/postgres-shared/src/params/url.rs deleted file mode 100644 index e965f2152..000000000 --- a/postgres-shared/src/params/url.rs +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -use hex::FromHex; -use std::str::FromStr; - -pub struct Url { - pub scheme: String, - pub user: Option, - pub host: String, - pub port: Option, - pub path: Path, -} - -pub struct Path { - pub path: String, - pub query: Query, - pub fragment: Option, -} - -pub struct UserInfo { - pub user: String, - pub pass: Option, -} - -pub type Query = Vec<(String, String)>; - -impl Url { - pub fn new( - scheme: String, - user: Option, - host: String, - port: Option, - path: String, - query: Query, - fragment: Option, - ) -> Url { - Url { - scheme: scheme, - user: user, - host: host, - port: port, - path: Path::new(path, query, fragment), - } - } - - pub fn parse(rawurl: &str) -> DecodeResult { - // scheme - let (scheme, rest) = get_scheme(rawurl)?; - - // authority - let (userinfo, host, port, rest) = get_authority(rest)?; - - // path - let has_authority = !host.is_empty(); - let (path, rest) = get_path(rest, has_authority)?; - - // query and fragment - let (query, fragment) = get_query_fragment(rest)?; - - let url = Url::new( - scheme.to_owned(), - userinfo, - host.to_owned(), - port, - path, - query, - fragment, - ); - Ok(url) - } -} - -impl Path { - pub fn new(path: String, query: Query, fragment: Option) -> Path { - Path { - path: path, - query: query, - fragment: fragment, - } - } - - pub fn parse(rawpath: &str) -> DecodeResult { - let (path, rest) = get_path(rawpath, false)?; - - // query and fragment - let (query, fragment) = get_query_fragment(&rest)?; - - Ok(Path { - path: path, - query: query, - fragment: fragment, - }) - } -} - -impl UserInfo { - #[inline] - pub fn new(user: String, pass: Option) -> UserInfo { - UserInfo { - user: user, - pass: pass, - } - } -} - -pub type DecodeResult = Result; - -pub fn decode_component(container: &str) -> DecodeResult { - decode_inner(container, false) -} - -fn decode_inner(c: &str, full_url: bool) -> DecodeResult { - let mut out = String::new(); - let mut iter = c.as_bytes().iter().cloned(); - - loop { - match iter.next() { - Some(b) => { - match b as char { - '%' => { - let bytes = match (iter.next(), iter.next()) { - (Some(one), Some(two)) => [one, two], - _ => { - return Err("Malformed input: found '%' without two \ - trailing bytes" - .to_owned()) - } - }; - - let bytes_from_hex = match Vec::::from_hex(&bytes) { - Ok(b) => b, - _ => { - return Err("Malformed input: found '%' followed by \ - invalid hex values. Character '%' must \ - escaped." - .to_owned()) - } - }; - - // Only decode some characters if full_url: - match bytes_from_hex[0] as char { - // gen-delims: - ':' | '/' | '?' | '#' | '[' | ']' | '@' | '!' | '$' | '&' | '"' - | '(' | ')' | '*' | '+' | ',' | ';' | '=' - if full_url => - { - out.push('%'); - out.push(bytes[0] as char); - out.push(bytes[1] as char); - } - - ch => out.push(ch), - } - } - ch => out.push(ch), - } - } - None => return Ok(out), - } - } -} - -fn split_char_first(s: &str, c: char) -> (&str, &str) { - let mut iter = s.splitn(2, c); - - match (iter.next(), iter.next()) { - (Some(a), Some(b)) => (a, b), - (Some(a), None) => (a, ""), - (None, _) => unreachable!(), - } -} - -fn query_from_str(rawquery: &str) -> DecodeResult { - let mut query: Query = vec![]; - if !rawquery.is_empty() { - for p in rawquery.split('&') { - let (k, v) = split_char_first(p, '='); - query.push((decode_component(k)?, decode_component(v)?)); - } - } - - Ok(query) -} - -pub fn get_scheme(rawurl: &str) -> DecodeResult<(&str, &str)> { - for (i, c) in rawurl.chars().enumerate() { - let result = match c { - 'A'...'Z' | 'a'...'z' => continue, - '0'...'9' | '+' | '-' | '.' => { - if i != 0 { - continue; - } - - Err("url: Scheme must begin with a letter.".to_owned()) - } - ':' => { - if i == 0 { - Err("url: Scheme cannot be empty.".to_owned()) - } else { - Ok((&rawurl[0..i], &rawurl[i + 1..rawurl.len()])) - } - } - _ => Err("url: Invalid character in scheme.".to_owned()), - }; - - return result; - } - - Err("url: Scheme must be terminated with a colon.".to_owned()) -} - -// returns userinfo, host, port, and unparsed part, or an error -fn get_authority(rawurl: &str) -> DecodeResult<(Option, &str, Option, &str)> { - enum State { - Start, // starting state - PassHostPort, // could be in user or port - Ip6Port, // either in ipv6 host or port - Ip6Host, // are in an ipv6 host - InHost, // are in a host - may be ipv6, but don't know yet - InPort, // are in port - } - - #[derive(Clone, PartialEq)] - enum Input { - Digit, // all digits - Hex, // digits and letters a-f - Unreserved, // all other legal characters - } - - if !rawurl.starts_with("//") { - // there is no authority. - return Ok((None, "", None, rawurl)); - } - - let len = rawurl.len(); - let mut st = State::Start; - let mut input = Input::Digit; // most restricted, start here. - - let mut userinfo = None; - let mut host = ""; - let mut port = None; - - let mut colon_count = 0usize; - let mut pos = 0; - let mut begin = 2; - let mut end = len; - - for (i, c) in rawurl.chars().enumerate().skip(2) { - // deal with input class first - match c { - '0'...'9' => (), - 'A'...'F' | 'a'...'f' => { - if input == Input::Digit { - input = Input::Hex; - } - } - 'G'...'Z' - | 'g'...'z' - | '-' - | '.' - | '_' - | '~' - | '%' - | '&' - | '\'' - | '(' - | ')' - | '+' - | '!' - | '*' - | ',' - | ';' - | '=' => input = Input::Unreserved, - ':' | '@' | '?' | '#' | '/' => { - // separators, don't change anything - } - _ => return Err("Illegal character in authority".to_owned()), - } - - // now process states - match c { - ':' => { - colon_count += 1; - match st { - State::Start => { - pos = i; - st = State::PassHostPort; - } - State::PassHostPort => { - // multiple colons means ipv6 address. - if input == Input::Unreserved { - return Err("Illegal characters in IPv6 address.".to_owned()); - } - st = State::Ip6Host; - } - State::InHost => { - pos = i; - if input == Input::Unreserved { - // must be port - host = &rawurl[begin..i]; - st = State::InPort; - } else { - // can't be sure whether this is an ipv6 address or a port - st = State::Ip6Port; - } - } - State::Ip6Port => { - if input == Input::Unreserved { - return Err("Illegal characters in authority.".to_owned()); - } - st = State::Ip6Host; - } - State::Ip6Host => { - if colon_count > 7 { - host = &rawurl[begin..i]; - pos = i; - st = State::InPort; - } - } - _ => return Err("Invalid ':' in authority.".to_owned()), - } - input = Input::Digit; // reset input class - } - - '@' => { - input = Input::Digit; // reset input class - colon_count = 0; // reset count - match st { - State::Start => { - let user = decode_component(&rawurl[begin..i])?; - userinfo = Some(UserInfo::new(user, None)); - st = State::InHost; - } - State::PassHostPort => { - let user = decode_component(&rawurl[begin..pos])?; - let pass = decode_component(&rawurl[pos + 1..i])?; - userinfo = Some(UserInfo::new(user, Some(pass))); - st = State::InHost; - } - _ => return Err("Invalid '@' in authority.".to_owned()), - } - begin = i + 1; - } - - '?' | '#' | '/' => { - end = i; - break; - } - _ => (), - } - } - - // finish up - match st { - State::PassHostPort | State::Ip6Port => { - if input != Input::Digit { - return Err("Non-digit characters in port.".to_owned()); - } - host = &rawurl[begin..pos]; - port = Some(&rawurl[pos + 1..end]); - } - State::Ip6Host | State::InHost | State::Start => host = &rawurl[begin..end], - State::InPort => { - if input != Input::Digit { - return Err("Non-digit characters in port.".to_owned()); - } - port = Some(&rawurl[pos + 1..end]); - } - } - - let rest = &rawurl[end..len]; - // If we have a port string, ensure it parses to u16. - let port = match port { - None => None, - opt => match opt.and_then(|p| FromStr::from_str(p).ok()) { - None => return Err(format!("Failed to parse port: {:?}", port)), - opt => opt, - }, - }; - - Ok((userinfo, host, port, rest)) -} - -// returns the path and unparsed part of url, or an error -fn get_path(rawurl: &str, is_authority: bool) -> DecodeResult<(String, &str)> { - let len = rawurl.len(); - let mut end = len; - for (i, c) in rawurl.chars().enumerate() { - match c { - 'A'...'Z' - | 'a'...'z' - | '0'...'9' - | '&' - | '\'' - | '(' - | ')' - | '.' - | '@' - | ':' - | '%' - | '/' - | '+' - | '!' - | '*' - | ',' - | ';' - | '=' - | '_' - | '-' - | '~' => continue, - '?' | '#' => { - end = i; - break; - } - _ => return Err("Invalid character in path.".to_owned()), - } - } - - if is_authority && end != 0 && !rawurl.starts_with('/') { - Err("Non-empty path must begin with '/' in presence of authority.".to_owned()) - } else { - Ok((decode_component(&rawurl[0..end])?, &rawurl[end..len])) - } -} - -// returns the parsed query and the fragment, if present -fn get_query_fragment(rawurl: &str) -> DecodeResult<(Query, Option)> { - let (before_fragment, raw_fragment) = split_char_first(rawurl, '#'); - - // Parse the fragment if available - let fragment = match raw_fragment { - "" => None, - raw => Some(decode_component(raw)?), - }; - - match before_fragment.chars().next() { - Some('?') => Ok((query_from_str(&before_fragment[1..])?, fragment)), - None => Ok((vec![], fragment)), - _ => Err(format!( - "Query didn't start with '?': '{}..'", - before_fragment - )), - } -} - -impl FromStr for Url { - type Err = String; - fn from_str(s: &str) -> Result { - Url::parse(s) - } -} - -impl FromStr for Path { - type Err = String; - fn from_str(s: &str) -> Result { - Path::parse(s) - } -} diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ae1e43eac..538929c7a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -23,12 +23,12 @@ features = [ circle-ci = { repository = "sfackler/rust-postgres" } [features] -"with-bit-vec-0.5" = ["postgres-shared/with-bit-vec-0.5"] -"with-chrono-0.4" = ["postgres-shared/with-chrono-0.4"] -"with-eui48-0.3" = ["postgres-shared/with-eui48-0.3"] -"with-geo-0.10" = ["postgres-shared/with-geo-0.10"] -"with-serde_json-1" = ["postgres-shared/with-serde_json-1"] -"with-uuid-0.6" = ["postgres-shared/with-uuid-0.6"] +"with-bit-vec-0.5" = ["bit-vec"] +"with-chrono-0.4" = ["chrono"] +"with-eui48-0.3" = ["eui48"] +"with-geo-0.10" = ["geo"] +with-serde_json-1 = ["serde", "serde_json"] +"with-uuid-0.6" = ["uuid"] [dependencies] antidote = "1.0" @@ -37,14 +37,21 @@ fallible-iterator = "0.1.3" futures = "0.1.7" futures-cpupool = "0.1" log = "0.4" -phf = "=0.7.22" +phf = "0.7.23" postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } -postgres-shared = { version = "0.4.0", path = "../postgres-shared" } state_machine_future = "0.1.7" tokio-codec = "0.1" tokio-io = "0.1" void = "1.0" +bit-vec = { version = "0.5", optional = true } +chrono = { version = "0.4", optional = true } +eui48 = { version = "0.3", optional = true } +geo = { version = "0.10", optional = true } +serde = { version = "1.0", optional = true } +serde_json = { version = "1.0", optional = true } +uuid = { version = "0.6", optional = true } + [dev-dependencies] tokio = "0.1.7" env_logger = "0.5" diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 022a79b25..b13a47c26 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -4,7 +4,6 @@ extern crate fallible_iterator; extern crate futures_cpupool; extern crate phf; extern crate postgres_protocol; -extern crate postgres_shared; extern crate tokio_codec; extern crate tokio_io; extern crate void; @@ -18,29 +17,26 @@ extern crate state_machine_future; use bytes::{Bytes, IntoBuf}; use futures::{Async, Future, Poll, Stream}; -use postgres_shared::rows::RowIndex; use std::error::Error as StdError; use std::fmt; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; -#[doc(inline)] -pub use postgres_shared::stmt::Column; -#[doc(inline)] -pub use postgres_shared::{params, types}; -#[doc(inline)] -pub use postgres_shared::{CancelData, Notification}; - pub use builder::*; pub use error::*; use proto::CancelFuture; +use rows::RowIndex; +pub use stmt::Column; pub use tls::*; use types::{FromSql, ToSql, Type}; mod builder; pub mod error; mod proto; +pub mod rows; +mod stmt; mod tls; +pub mod types; fn next_statement() -> String { static ID: AtomicUsize = AtomicUsize::new(0); @@ -386,3 +382,23 @@ impl Future for BatchExecute { self.0.poll() } } + +/// Contains information necessary to cancel queries for a session. +#[derive(Copy, Clone, Debug)] +pub struct CancelData { + /// The process ID of the session. + pub process_id: i32, + /// The secret key for the session. + pub secret_key: i32, +} + +/// An asynchronous notification. +#[derive(Clone, Debug)] +pub struct Notification { + /// The process ID of the notifying backend process. + pub process_id: i32, + /// The name of the channel that the notify has been raised on. + pub channel: String, + /// The "payload" string passed from the notifying process. + pub payload: String, +} diff --git a/tokio-postgres/src/proto/row.rs b/tokio-postgres/src/proto/row.rs index 38c270d45..86348f460 100644 --- a/tokio-postgres/src/proto/row.rs +++ b/tokio-postgres/src/proto/row.rs @@ -1,8 +1,8 @@ use postgres_protocol::message::backend::DataRowBody; -use postgres_shared::rows::{RowData, RowIndex}; use std::fmt; use proto::statement::Statement; +use rows::{RowData, RowIndex}; use types::{FromSql, WrongType}; use {Column, Error}; diff --git a/tokio-postgres/src/proto/statement.rs b/tokio-postgres/src/proto/statement.rs index 3460a76c0..12bee1a22 100644 --- a/tokio-postgres/src/proto/statement.rs +++ b/tokio-postgres/src/proto/statement.rs @@ -1,8 +1,8 @@ -use postgres_shared::stmt::Column; use std::sync::Arc; use proto::client::WeakClient; use types::Type; +use Column; pub struct StatementInner { client: WeakClient, diff --git a/postgres-shared/src/rows.rs b/tokio-postgres/src/rows.rs similarity index 100% rename from postgres-shared/src/rows.rs rename to tokio-postgres/src/rows.rs diff --git a/postgres-shared/src/stmt.rs b/tokio-postgres/src/stmt.rs similarity index 100% rename from postgres-shared/src/stmt.rs rename to tokio-postgres/src/stmt.rs diff --git a/postgres-shared/src/types/bit_vec.rs b/tokio-postgres/src/types/bit_vec.rs similarity index 100% rename from postgres-shared/src/types/bit_vec.rs rename to tokio-postgres/src/types/bit_vec.rs diff --git a/postgres-shared/src/types/chrono.rs b/tokio-postgres/src/types/chrono.rs similarity index 100% rename from postgres-shared/src/types/chrono.rs rename to tokio-postgres/src/types/chrono.rs diff --git a/postgres-shared/src/types/eui48.rs b/tokio-postgres/src/types/eui48.rs similarity index 100% rename from postgres-shared/src/types/eui48.rs rename to tokio-postgres/src/types/eui48.rs diff --git a/postgres-shared/src/types/geo.rs b/tokio-postgres/src/types/geo.rs similarity index 100% rename from postgres-shared/src/types/geo.rs rename to tokio-postgres/src/types/geo.rs diff --git a/postgres-shared/src/types/mod.rs b/tokio-postgres/src/types/mod.rs similarity index 100% rename from postgres-shared/src/types/mod.rs rename to tokio-postgres/src/types/mod.rs diff --git a/postgres-shared/src/types/serde_json.rs b/tokio-postgres/src/types/serde_json.rs similarity index 100% rename from postgres-shared/src/types/serde_json.rs rename to tokio-postgres/src/types/serde_json.rs diff --git a/postgres-shared/src/types/special.rs b/tokio-postgres/src/types/special.rs similarity index 100% rename from postgres-shared/src/types/special.rs rename to tokio-postgres/src/types/special.rs diff --git a/postgres-shared/src/types/type_gen.rs b/tokio-postgres/src/types/type_gen.rs similarity index 100% rename from postgres-shared/src/types/type_gen.rs rename to tokio-postgres/src/types/type_gen.rs diff --git a/postgres-shared/src/types/uuid.rs b/tokio-postgres/src/types/uuid.rs similarity index 100% rename from postgres-shared/src/types/uuid.rs rename to tokio-postgres/src/types/uuid.rs From b4ce9c38e5af762ecdb708720747e3a46f79233e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:40:37 -0800 Subject: [PATCH 031/819] Upgrade tokio-postgres to 2018 edition --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/builder.rs | 4 +- tokio-postgres/src/error/mod.rs | 26 +++--- tokio-postgres/src/lib.rs | 54 +++++-------- tokio-postgres/src/proto/bind.rs | 11 +-- tokio-postgres/src/proto/cancel.rs | 10 +-- tokio-postgres/src/proto/client.rs | 40 +++++----- tokio-postgres/src/proto/connect.rs | 10 +-- tokio-postgres/src/proto/connection.rs | 11 +-- tokio-postgres/src/proto/copy_in.rs | 16 ++-- tokio-postgres/src/proto/copy_out.rs | 6 +- tokio-postgres/src/proto/execute.rs | 8 +- tokio-postgres/src/proto/mod.rs | 36 ++++----- tokio-postgres/src/proto/portal.rs | 4 +- tokio-postgres/src/proto/prepare.rs | 14 ++-- tokio-postgres/src/proto/query.rs | 10 +-- tokio-postgres/src/proto/row.rs | 8 +- tokio-postgres/src/proto/simple_query.rs | 6 +- tokio-postgres/src/proto/statement.rs | 6 +- tokio-postgres/src/proto/tls.rs | 6 +- tokio-postgres/src/proto/transaction.rs | 10 +-- tokio-postgres/src/proto/typeinfo.rs | 29 +++---- .../src/proto/typeinfo_composite.rs | 23 +++--- tokio-postgres/src/proto/typeinfo_enum.rs | 25 +++--- tokio-postgres/src/rows.rs | 6 +- tokio-postgres/src/stmt.rs | 2 +- tokio-postgres/src/tls.rs | 18 ++--- tokio-postgres/src/types/bit_vec.rs | 4 +- tokio-postgres/src/types/chrono.rs | 4 +- tokio-postgres/src/types/eui48.rs | 4 +- tokio-postgres/src/types/geo.rs | 4 +- tokio-postgres/src/types/mod.rs | 70 ++++++++-------- tokio-postgres/src/types/serde_json.rs | 7 +- tokio-postgres/src/types/special.rs | 10 +-- tokio-postgres/src/types/type_gen.rs | 2 +- tokio-postgres/src/types/uuid.rs | 4 +- tokio-postgres/tests/test.rs | 80 +++++++++++-------- 37 files changed, 289 insertions(+), 300 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 538929c7a..ea8f66590 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -2,6 +2,7 @@ name = "tokio-postgres" version = "0.3.0" authors = ["Steven Fackler "] +edition = "2018" license = "MIT" description = "A native PostgreSQL driver using Tokio" repository = "https://github.com/sfackler/rust-postgres" diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index d7436c2b6..8317522a3 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use tokio_io::{AsyncRead, AsyncWrite}; -use proto::ConnectFuture; -use {Connect, TlsMode}; +use crate::proto::ConnectFuture; +use crate::{Connect, TlsMode}; #[derive(Clone)] pub struct Builder { diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index bda6d4023..20bb95e52 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -32,7 +32,7 @@ pub enum Severity { } impl fmt::Display for Severity { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match *self { Severity::Panic => "PANIC", Severity::Fatal => "FATAL", @@ -85,7 +85,7 @@ pub struct DbError { } impl DbError { - pub(crate) fn new(fields: &mut ErrorFields) -> io::Result { + pub(crate) fn new(fields: &mut ErrorFields<'_>) -> io::Result { let mut severity = None; let mut parsed_severity = None; let mut code = None; @@ -307,7 +307,7 @@ impl DbError { } impl fmt::Display for DbError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message) } } @@ -348,14 +348,14 @@ enum Kind { struct ErrorInner { kind: Kind, - cause: Option>, + cause: Option>, } /// An error communicating with the Postgres server. pub struct Error(Box); impl fmt::Debug for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Error") .field("kind", &self.0.kind) .field("cause", &self.0.cause) @@ -364,7 +364,7 @@ impl fmt::Debug for Error { } impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match self.0.kind { Kind::Io => "error communicating with the server", Kind::UnexpectedMessage => "unexpected message from server", @@ -390,14 +390,14 @@ impl fmt::Display for Error { } impl error::Error for Error { - fn source(&self) -> Option<&(error::Error + 'static)> { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { self.0.cause.as_ref().map(|e| &**e as _) } } impl Error { /// Consumes the error, returning its cause. - pub fn into_cause(self) -> Option> { + pub fn into_cause(self) -> Option> { self.0.cause } @@ -411,7 +411,7 @@ impl Error { .map(|e| e.code()) } - fn new(kind: Kind, cause: Option>) -> Error { + fn new(kind: Kind, cause: Option>) -> Error { Error(Box::new(ErrorInner { kind, cause })) } @@ -438,17 +438,17 @@ impl Error { Error::new(Kind::Encode, Some(Box::new(e))) } - pub(crate) fn to_sql(e: Box) -> Error { + pub(crate) fn to_sql(e: Box) -> Error { Error::new(Kind::ToSql, Some(e)) } - pub(crate) fn from_sql(e: Box) -> Error { + pub(crate) fn from_sql(e: Box) -> Error { Error::new(Kind::FromSql, Some(e)) } pub(crate) fn copy_in_stream(e: E) -> Error where - E: Into>, + E: Into>, { Error::new(Kind::CopyInStream, Some(e.into())) } @@ -465,7 +465,7 @@ impl Error { Error::new(Kind::UnsupportedAuthentication, None) } - pub(crate) fn tls(e: Box) -> Error { + pub(crate) fn tls(e: Box) -> Error { Error::new(Kind::Tls, Some(e)) } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b13a47c26..83bee86a9 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -1,34 +1,19 @@ -extern crate antidote; -extern crate bytes; -extern crate fallible_iterator; -extern crate futures_cpupool; -extern crate phf; -extern crate postgres_protocol; -extern crate tokio_codec; -extern crate tokio_io; -extern crate void; - -#[macro_use] -extern crate futures; -#[macro_use] -extern crate log; -#[macro_use] -extern crate state_machine_future; +#![warn(rust_2018_idioms)] use bytes::{Bytes, IntoBuf}; -use futures::{Async, Future, Poll, Stream}; +use futures::{try_ready, Async, Future, Poll, Stream}; use std::error::Error as StdError; use std::fmt; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; -pub use builder::*; -pub use error::*; -use proto::CancelFuture; -use rows::RowIndex; -pub use stmt::Column; -pub use tls::*; -use types::{FromSql, ToSql, Type}; +pub use crate::builder::*; +pub use crate::error::*; +use crate::proto::CancelFuture; +use crate::rows::RowIndex; +pub use crate::stmt::Column; +pub use crate::tls::*; +use crate::types::{FromSql, ToSql, Type}; mod builder; pub mod error; @@ -67,15 +52,15 @@ impl Client { Prepare(self.0.prepare(next_statement(), query, param_types)) } - pub fn execute(&mut self, statement: &Statement, params: &[&ToSql]) -> Execute { + pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Execute { Execute(self.0.execute(&statement.0, params)) } - pub fn query(&mut self, statement: &Statement, params: &[&ToSql]) -> Query { + pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Query { Query(self.0.query(&statement.0, params)) } - pub fn bind(&mut self, statement: &Statement, params: &[&ToSql]) -> Bind { + pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Bind { Bind(self.0.bind(&statement.0, next_portal(), params)) } @@ -83,18 +68,23 @@ impl Client { QueryPortal(self.0.query_portal(&portal.0, max_rows)) } - pub fn copy_in(&mut self, statement: &Statement, params: &[&ToSql], stream: S) -> CopyIn + pub fn copy_in( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + stream: S, + ) -> CopyIn where S: Stream, S::Item: IntoBuf, ::Buf: Send, // FIXME error type? - S::Error: Into>, + S::Error: Into>, { CopyIn(self.0.copy_in(&statement.0, params, stream)) } - pub fn copy_out(&mut self, statement: &Statement, params: &[&ToSql]) -> CopyOut { + pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOut { CopyOut(self.0.copy_out(&statement.0, params)) } @@ -282,14 +272,14 @@ where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>; + S::Error: Into>; impl Future for CopyIn where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>, + S::Error: Into>, { type Item = u64; type Error = Error; diff --git a/tokio-postgres/src/proto/bind.rs b/tokio-postgres/src/proto/bind.rs index 00f78a0f4..0a8a3b5f0 100644 --- a/tokio-postgres/src/proto/bind.rs +++ b/tokio-postgres/src/proto/bind.rs @@ -1,11 +1,12 @@ use futures::sync::mpsc; use futures::{Poll, Stream}; use postgres_protocol::message::backend::Message; -use proto::client::{Client, PendingRequest}; -use proto::portal::Portal; -use proto::statement::Statement; -use state_machine_future::RentToOwn; -use Error; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; + +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::portal::Portal; +use crate::proto::statement::Statement; +use crate::Error; #[derive(StateMachineFuture)] pub enum Bind { diff --git a/tokio-postgres/src/proto/cancel.rs b/tokio-postgres/src/proto/cancel.rs index 0a5e2492e..dc37de654 100644 --- a/tokio-postgres/src/proto/cancel.rs +++ b/tokio-postgres/src/proto/cancel.rs @@ -1,12 +1,12 @@ -use futures::{Future, Poll}; +use futures::{try_ready, Future, Poll}; use postgres_protocol::message::frontend; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use tokio_io::io::{self, Flush, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; -use error::Error; -use proto::TlsFuture; -use {CancelData, TlsMode}; +use crate::error::Error; +use crate::proto::TlsFuture; +use crate::{CancelData, TlsMode}; #[derive(StateMachineFuture)] pub enum Cancel diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 60b2f7098..bae395ad2 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -9,18 +9,18 @@ use std::collections::HashMap; use std::error::Error as StdError; use std::sync::{Arc, Weak}; -use proto::bind::BindFuture; -use proto::connection::{Request, RequestMessages}; -use proto::copy_in::{CopyInFuture, CopyInReceiver, CopyMessage}; -use proto::copy_out::CopyOutStream; -use proto::execute::ExecuteFuture; -use proto::portal::Portal; -use proto::prepare::PrepareFuture; -use proto::query::QueryStream; -use proto::simple_query::SimpleQueryFuture; -use proto::statement::Statement; -use types::{IsNull, Oid, ToSql, Type}; -use Error; +use crate::proto::bind::BindFuture; +use crate::proto::connection::{Request, RequestMessages}; +use crate::proto::copy_in::{CopyInFuture, CopyInReceiver, CopyMessage}; +use crate::proto::copy_out::CopyOutStream; +use crate::proto::execute::ExecuteFuture; +use crate::proto::portal::Portal; +use crate::proto::prepare::PrepareFuture; +use crate::proto::query::QueryStream; +use crate::proto::simple_query::SimpleQueryFuture; +use crate::proto::statement::Statement; +use crate::types::{IsNull, Oid, ToSql, Type}; +use crate::Error; pub struct PendingRequest(Result); @@ -127,7 +127,7 @@ impl Client { PrepareFuture::new(self.clone(), pending, name) } - pub fn execute(&self, statement: &Statement, params: &[&ToSql]) -> ExecuteFuture { + pub fn execute(&self, statement: &Statement, params: &[&dyn ToSql]) -> ExecuteFuture { let pending = PendingRequest( self.excecute_message(statement, params) .map(RequestMessages::Single), @@ -135,7 +135,7 @@ impl Client { ExecuteFuture::new(self.clone(), pending, statement.clone()) } - pub fn query(&self, statement: &Statement, params: &[&ToSql]) -> QueryStream { + pub fn query(&self, statement: &Statement, params: &[&dyn ToSql]) -> QueryStream { let pending = PendingRequest( self.excecute_message(statement, params) .map(RequestMessages::Single), @@ -143,7 +143,7 @@ impl Client { QueryStream::new(self.clone(), pending, statement.clone()) } - pub fn bind(&self, statement: &Statement, name: String, params: &[&ToSql]) -> BindFuture { + pub fn bind(&self, statement: &Statement, name: String, params: &[&dyn ToSql]) -> BindFuture { let mut buf = self.bind_message(statement, &name, params); if let Ok(ref mut buf) = buf { frontend::sync(buf); @@ -161,12 +161,12 @@ impl Client { QueryStream::new(self.clone(), pending, portal.clone()) } - pub fn copy_in(&self, statement: &Statement, params: &[&ToSql], stream: S) -> CopyInFuture + pub fn copy_in(&self, statement: &Statement, params: &[&dyn ToSql], stream: S) -> CopyInFuture where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>, + S::Error: Into>, { let (mut sender, receiver) = mpsc::channel(0); let pending = PendingRequest(self.excecute_message(statement, params).map(|buf| { @@ -182,7 +182,7 @@ impl Client { CopyInFuture::new(self.clone(), pending, statement.clone(), stream, sender) } - pub fn copy_out(&self, statement: &Statement, params: &[&ToSql]) -> CopyOutStream { + pub fn copy_out(&self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOutStream { let pending = PendingRequest( self.excecute_message(statement, params) .map(RequestMessages::Single), @@ -213,7 +213,7 @@ impl Client { &self, statement: &Statement, name: &str, - params: &[&ToSql], + params: &[&dyn ToSql], ) -> Result, Error> { let mut buf = vec![]; let r = frontend::bind( @@ -236,7 +236,7 @@ impl Client { } } - fn excecute_message(&self, statement: &Statement, params: &[&ToSql]) -> Result, Error> { + fn excecute_message(&self, statement: &Statement, params: &[&dyn ToSql]) -> Result, Error> { let mut buf = self.bind_message(statement, "", params)?; frontend::execute("", 0, &mut buf).map_err(Error::parse)?; frontend::sync(&mut buf); diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index e843b593d..8b5fe6075 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,19 +1,19 @@ use fallible_iterator::FallibleIterator; use futures::sink; use futures::sync::mpsc; -use futures::{Future, Poll, Sink, Stream}; +use futures::{try_ready, Future, Poll, Sink, Stream}; use postgres_protocol::authentication; use postgres_protocol::authentication::sasl::{self, ScramSha256}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::collections::HashMap; use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use proto::{Client, Connection, PostgresCodec, TlsFuture}; -use {CancelData, ChannelBinding, Error, TlsMode}; +use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; +use crate::{CancelData, ChannelBinding, Error, TlsMode}; #[derive(StateMachineFuture)] pub enum Connect @@ -180,7 +180,7 @@ where return Err(Error::unsupported_authentication()); }; - let mut scram = ScramSha256::new(pass.as_bytes(), channel_binding); + let scram = ScramSha256::new(pass.as_bytes(), channel_binding); let mut buf = vec![]; frontend::sasl_initial_response(mechanism, scram.message(), &mut buf) diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index 2f56d191c..4bae09ff5 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -1,5 +1,6 @@ use futures::sync::mpsc; -use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; +use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; +use log::trace; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::collections::{HashMap, VecDeque}; @@ -7,10 +8,10 @@ use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use proto::codec::PostgresCodec; -use proto::copy_in::CopyInReceiver; -use {AsyncMessage, CancelData, Notification}; -use {DbError, Error}; +use crate::proto::codec::PostgresCodec; +use crate::proto::copy_in::CopyInReceiver; +use crate::{AsyncMessage, CancelData, Notification}; +use crate::{DbError, Error}; pub enum RequestMessages { Single(Vec), diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs index 85fbae7eb..2f75596a4 100644 --- a/tokio-postgres/src/proto/copy_in.rs +++ b/tokio-postgres/src/proto/copy_in.rs @@ -1,15 +1,15 @@ use bytes::{Buf, IntoBuf}; use futures::sink; use futures::sync::mpsc; -use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; +use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::error::Error as StdError; -use proto::client::{Client, PendingRequest}; -use proto::statement::Statement; -use Error; +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::statement::Statement; +use crate::Error; pub enum CopyMessage { Data(Vec), @@ -66,7 +66,7 @@ where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>, + S::Error: Into>, { #[state_machine_future(start, transitions(ReadCopyInResponse))] Start { @@ -107,7 +107,7 @@ where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>, + S::Error: Into>, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let state = state.take(); @@ -220,7 +220,7 @@ where S: Stream, S::Item: IntoBuf, ::Buf: Send, - S::Error: Into>, + S::Error: Into>, { pub fn new( client: Client, diff --git a/tokio-postgres/src/proto/copy_out.rs b/tokio-postgres/src/proto/copy_out.rs index 2fdf1dbf1..c0418222a 100644 --- a/tokio-postgres/src/proto/copy_out.rs +++ b/tokio-postgres/src/proto/copy_out.rs @@ -4,9 +4,9 @@ use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; use std::mem; -use proto::client::{Client, PendingRequest}; -use proto::statement::Statement; -use Error; +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::statement::Statement; +use crate::Error; enum State { Start { diff --git a/tokio-postgres/src/proto/execute.rs b/tokio-postgres/src/proto/execute.rs index 6ad3234ae..25b1e90a0 100644 --- a/tokio-postgres/src/proto/execute.rs +++ b/tokio-postgres/src/proto/execute.rs @@ -1,11 +1,11 @@ use futures::sync::mpsc; use futures::{Poll, Stream}; use postgres_protocol::message::backend::Message; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use proto::client::{Client, PendingRequest}; -use proto::statement::Statement; -use Error; +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::statement::Statement; +use crate::Error; #[derive(StateMachineFuture)] pub enum Execute { diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 7cf5512cc..802bd6cf8 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -13,7 +13,7 @@ macro_rules! try_ready_closed { match $e { Ok(::futures::Async::Ready(v)) => v, Ok(::futures::Async::NotReady) => return Ok(::futures::Async::NotReady), - Err(_) => return Err(::Error::closed()), + Err(_) => return Err(crate::Error::closed()), } }; } @@ -39,20 +39,20 @@ mod typeinfo; mod typeinfo_composite; mod typeinfo_enum; -pub use proto::bind::BindFuture; -pub use proto::cancel::CancelFuture; -pub use proto::client::Client; -pub use proto::codec::PostgresCodec; -pub use proto::connect::ConnectFuture; -pub use proto::connection::Connection; -pub use proto::copy_in::CopyInFuture; -pub use proto::copy_out::CopyOutStream; -pub use proto::execute::ExecuteFuture; -pub use proto::portal::Portal; -pub use proto::prepare::PrepareFuture; -pub use proto::query::QueryStream; -pub use proto::row::Row; -pub use proto::simple_query::SimpleQueryFuture; -pub use proto::statement::Statement; -pub use proto::tls::TlsFuture; -pub use proto::transaction::TransactionFuture; +pub use crate::proto::bind::BindFuture; +pub use crate::proto::cancel::CancelFuture; +pub use crate::proto::client::Client; +pub use crate::proto::codec::PostgresCodec; +pub use crate::proto::connect::ConnectFuture; +pub use crate::proto::connection::Connection; +pub use crate::proto::copy_in::CopyInFuture; +pub use crate::proto::copy_out::CopyOutStream; +pub use crate::proto::execute::ExecuteFuture; +pub use crate::proto::portal::Portal; +pub use crate::proto::prepare::PrepareFuture; +pub use crate::proto::query::QueryStream; +pub use crate::proto::row::Row; +pub use crate::proto::simple_query::SimpleQueryFuture; +pub use crate::proto::statement::Statement; +pub use crate::proto::tls::TlsFuture; +pub use crate::proto::transaction::TransactionFuture; diff --git a/tokio-postgres/src/proto/portal.rs b/tokio-postgres/src/proto/portal.rs index ef982fc5e..26b93e433 100644 --- a/tokio-postgres/src/proto/portal.rs +++ b/tokio-postgres/src/proto/portal.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use proto::client::WeakClient; -use proto::statement::Statement; +use crate::proto::client::WeakClient; +use crate::proto::statement::Statement; struct Inner { client: WeakClient, diff --git a/tokio-postgres/src/proto/prepare.rs b/tokio-postgres/src/proto/prepare.rs index b4bfaaf5a..368af5740 100644 --- a/tokio-postgres/src/proto/prepare.rs +++ b/tokio-postgres/src/proto/prepare.rs @@ -1,16 +1,16 @@ use fallible_iterator::FallibleIterator; use futures::sync::mpsc; -use futures::{Future, Poll, Stream}; +use futures::{try_ready, Future, Poll, Stream}; use postgres_protocol::message::backend::Message; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::mem; use std::vec; -use proto::client::{Client, PendingRequest}; -use proto::statement::Statement; -use proto::typeinfo::TypeinfoFuture; -use types::{Oid, Type}; -use {Column, Error}; +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::statement::Statement; +use crate::proto::typeinfo::TypeinfoFuture; +use crate::types::{Oid, Type}; +use crate::{Column, Error}; #[derive(StateMachineFuture)] pub enum Prepare { diff --git a/tokio-postgres/src/proto/query.rs b/tokio-postgres/src/proto/query.rs index 3cb5d1372..87474eda1 100644 --- a/tokio-postgres/src/proto/query.rs +++ b/tokio-postgres/src/proto/query.rs @@ -3,11 +3,11 @@ use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; use std::mem; -use proto::client::{Client, PendingRequest}; -use proto::portal::Portal; -use proto::row::Row; -use proto::statement::Statement; -use Error; +use crate::proto::client::{Client, PendingRequest}; +use crate::proto::portal::Portal; +use crate::proto::row::Row; +use crate::proto::statement::Statement; +use crate::Error; pub trait StatementHolder { fn statement(&self) -> &Statement; diff --git a/tokio-postgres/src/proto/row.rs b/tokio-postgres/src/proto/row.rs index 86348f460..5fad43753 100644 --- a/tokio-postgres/src/proto/row.rs +++ b/tokio-postgres/src/proto/row.rs @@ -1,10 +1,10 @@ use postgres_protocol::message::backend::DataRowBody; use std::fmt; -use proto::statement::Statement; -use rows::{RowData, RowIndex}; -use types::{FromSql, WrongType}; -use {Column, Error}; +use crate::proto::statement::Statement; +use crate::rows::{RowData, RowIndex}; +use crate::types::{FromSql, WrongType}; +use crate::{Column, Error}; pub struct Row { statement: Statement, diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index e39d1b4e1..d1c279305 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -1,10 +1,10 @@ use futures::sync::mpsc; use futures::{Poll, Stream}; use postgres_protocol::message::backend::Message; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use proto::client::{Client, PendingRequest}; -use Error; +use crate::proto::client::{Client, PendingRequest}; +use crate::Error; #[derive(StateMachineFuture)] pub enum SimpleQuery { diff --git a/tokio-postgres/src/proto/statement.rs b/tokio-postgres/src/proto/statement.rs index 12bee1a22..94703a48b 100644 --- a/tokio-postgres/src/proto/statement.rs +++ b/tokio-postgres/src/proto/statement.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use proto::client::WeakClient; -use types::Type; -use Column; +use crate::proto::client::WeakClient; +use crate::types::Type; +use crate::Column; pub struct StatementInner { client: WeakClient, diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs index a24394056..64a217251 100644 --- a/tokio-postgres/src/proto/tls.rs +++ b/tokio-postgres/src/proto/tls.rs @@ -1,10 +1,10 @@ -use futures::{Future, Poll}; +use futures::{try_ready, Future, Poll}; use postgres_protocol::message::frontend; -use state_machine_future::RentToOwn; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use tokio_io::io::{self, ReadExact, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; -use {ChannelBinding, Error, TlsMode}; +use crate::{ChannelBinding, Error, TlsMode}; #[derive(StateMachineFuture)] pub enum Tls diff --git a/tokio-postgres/src/proto/transaction.rs b/tokio-postgres/src/proto/transaction.rs index 2a7b08265..fb828f780 100644 --- a/tokio-postgres/src/proto/transaction.rs +++ b/tokio-postgres/src/proto/transaction.rs @@ -1,9 +1,9 @@ -use futures::{Async, Future, Poll}; -use proto::client::Client; -use proto::simple_query::SimpleQueryFuture; -use state_machine_future::RentToOwn; +use crate::proto::client::Client; +use crate::proto::simple_query::SimpleQueryFuture; +use futures::{try_ready, Async, Future, Poll}; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use Error; +use crate::Error; #[derive(StateMachineFuture)] pub enum Transaction diff --git a/tokio-postgres/src/proto/typeinfo.rs b/tokio-postgres/src/proto/typeinfo.rs index 81aedcd9f..78a62611a 100644 --- a/tokio-postgres/src/proto/typeinfo.rs +++ b/tokio-postgres/src/proto/typeinfo.rs @@ -1,16 +1,16 @@ use futures::stream::{self, Stream}; -use futures::{Async, Future, Poll}; -use state_machine_future::RentToOwn; +use futures::{try_ready, Async, Future, Poll}; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use error::{Error, SqlState}; -use next_statement; -use proto::client::Client; -use proto::prepare::PrepareFuture; -use proto::query::QueryStream; -use proto::statement::Statement; -use proto::typeinfo_composite::TypeinfoCompositeFuture; -use proto::typeinfo_enum::TypeinfoEnumFuture; -use types::{Kind, Oid, Type}; +use crate::error::{Error, SqlState}; +use crate::next_statement; +use crate::proto::client::Client; +use crate::proto::prepare::PrepareFuture; +use crate::proto::query::QueryStream; +use crate::proto::statement::Statement; +use crate::proto::typeinfo_composite::TypeinfoCompositeFuture; +use crate::proto::typeinfo_enum::TypeinfoEnumFuture; +use crate::types::{Kind, Oid, Type}; const TYPEINFO_QUERY: &'static str = " SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid @@ -30,10 +30,7 @@ WHERE t.oid = $1 #[derive(StateMachineFuture)] pub enum Typeinfo { - #[state_machine_future( - start, - transitions(PreparingTypeinfo, QueryingTypeinfo, Finished) - )] + #[state_machine_future(start, transitions(PreparingTypeinfo, QueryingTypeinfo, Finished))] Start { oid: Oid, client: Client }, #[state_machine_future(transitions(PreparingTypeinfoFallback, QueryingTypeinfo))] PreparingTypeinfo { @@ -136,7 +133,7 @@ impl PollTypeinfo for Typeinfo { Ok(Async::Ready(statement)) => statement, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { - let mut state = state.take(); + let state = state.take(); transition!(PreparingTypeinfoFallback { future: Box::new(state.client.prepare( diff --git a/tokio-postgres/src/proto/typeinfo_composite.rs b/tokio-postgres/src/proto/typeinfo_composite.rs index ab2f16fbd..9df260a48 100644 --- a/tokio-postgres/src/proto/typeinfo_composite.rs +++ b/tokio-postgres/src/proto/typeinfo_composite.rs @@ -1,17 +1,17 @@ use futures::stream::{self, Stream}; -use futures::{Future, Poll}; -use state_machine_future::RentToOwn; +use futures::{try_ready, Future, Poll}; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::mem; use std::vec; -use error::Error; -use next_statement; -use proto::client::Client; -use proto::prepare::PrepareFuture; -use proto::query::QueryStream; -use proto::statement::Statement; -use proto::typeinfo::TypeinfoFuture; -use types::{Field, Oid}; +use crate::error::Error; +use crate::next_statement; +use crate::proto::client::Client; +use crate::proto::prepare::PrepareFuture; +use crate::proto::query::QueryStream; +use crate::proto::statement::Statement; +use crate::proto::typeinfo::TypeinfoFuture; +use crate::types::{Field, Oid}; const TYPEINFO_COMPOSITE_QUERY: &'static str = " SELECT attname, atttypid @@ -99,7 +99,8 @@ impl PollTypeinfoComposite for TypeinfoComposite { let name = row.try_get(0)?.ok_or_else(Error::unexpected_message)?; let oid = row.try_get(1)?.ok_or_else(Error::unexpected_message)?; Ok((name, oid)) - }).collect::, Error>>()?; + }) + .collect::, Error>>()?; let mut remaining_fields = fields.into_iter(); match remaining_fields.next() { diff --git a/tokio-postgres/src/proto/typeinfo_enum.rs b/tokio-postgres/src/proto/typeinfo_enum.rs index 82283dbe4..2f676f800 100644 --- a/tokio-postgres/src/proto/typeinfo_enum.rs +++ b/tokio-postgres/src/proto/typeinfo_enum.rs @@ -1,14 +1,14 @@ use futures::stream::{self, Stream}; -use futures::{Async, Future, Poll}; -use state_machine_future::RentToOwn; +use futures::{try_ready, Async, Future, Poll}; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use error::{Error, SqlState}; -use next_statement; -use proto::client::Client; -use proto::prepare::PrepareFuture; -use proto::query::QueryStream; -use proto::statement::Statement; -use types::Oid; +use crate::error::{Error, SqlState}; +use crate::next_statement; +use crate::proto::client::Client; +use crate::proto::prepare::PrepareFuture; +use crate::proto::query::QueryStream; +use crate::proto::statement::Statement; +use crate::types::Oid; const TYPEINFO_ENUM_QUERY: &'static str = " SELECT enumlabel @@ -27,10 +27,7 @@ ORDER BY oid #[derive(StateMachineFuture)] pub enum TypeinfoEnum { - #[state_machine_future( - start, - transitions(PreparingTypeinfoEnum, QueryingEnumVariants) - )] + #[state_machine_future(start, transitions(PreparingTypeinfoEnum, QueryingEnumVariants))] Start { oid: Oid, client: Client }, #[state_machine_future(transitions(PreparingTypeinfoEnumFallback, QueryingEnumVariants))] PreparingTypeinfoEnum { @@ -83,7 +80,7 @@ impl PollTypeinfoEnum for TypeinfoEnum { Ok(Async::Ready(statement)) => statement, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { - let mut state = state.take(); + let state = state.take(); transition!(PreparingTypeinfoEnumFallback { future: Box::new(state.client.prepare( diff --git a/tokio-postgres/src/rows.rs b/tokio-postgres/src/rows.rs index 2b0f8860a..54b934b5c 100644 --- a/tokio-postgres/src/rows.rs +++ b/tokio-postgres/src/rows.rs @@ -3,11 +3,11 @@ use postgres_protocol::message::backend::DataRowBody; use std::io; use std::ops::Range; -use rows::sealed::Sealed; -use stmt::Column; +use crate::rows::sealed::Sealed; +use crate::stmt::Column; mod sealed { - use stmt::Column; + use crate::stmt::Column; pub trait Sealed { fn __idx(&self, stmt: &[Column]) -> Option; diff --git a/tokio-postgres/src/stmt.rs b/tokio-postgres/src/stmt.rs index 85a993daf..037706d2d 100644 --- a/tokio-postgres/src/stmt.rs +++ b/tokio-postgres/src/stmt.rs @@ -1,4 +1,4 @@ -use types::Type; +use crate::types::Type; /// Information about a column of a Postgres query. #[derive(Debug)] diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 30ea8e16b..50dc8e26c 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -1,6 +1,6 @@ use bytes::{Buf, BufMut}; use futures::future::{self, FutureResult}; -use futures::{Async, Future, Poll}; +use futures::{try_ready, Async, Future, Poll}; use std::error::Error; use std::fmt; use std::io::{self, Read, Write}; @@ -27,7 +27,7 @@ impl ChannelBinding { pub trait TlsMode { type Stream: AsyncRead + AsyncWrite; - type Error: Into>; + type Error: Into>; type Future: Future; fn request_tls(&self) -> bool; @@ -37,7 +37,7 @@ pub trait TlsMode { pub trait TlsConnect { type Stream: AsyncRead + AsyncWrite; - type Error: Into>; + type Error: Into>; type Future: Future; fn connect(self, stream: S) -> Self::Future; @@ -212,7 +212,7 @@ where T: TlsConnect, { type Stream = T::Stream; - type Error = Box; + type Error = Box; type Future = RequireTlsFuture; fn request_tls(&self) -> bool { @@ -234,7 +234,7 @@ where pub struct TlsUnsupportedError(()); impl fmt::Display for TlsUnsupportedError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.write_str("TLS was required but not supported by the server") } } @@ -242,18 +242,18 @@ impl fmt::Display for TlsUnsupportedError { impl Error for TlsUnsupportedError {} pub struct RequireTlsFuture { - f: Option>>, + f: Option>>, } impl Future for RequireTlsFuture where T: Future, - T::Error: Into>, + T::Error: Into>, { type Item = T::Item; - type Error = Box; + type Error = Box; - fn poll(&mut self) -> Poll> { + fn poll(&mut self) -> Poll> { match self.f.take().expect("future polled after completion") { Ok(mut f) => match f.poll().map_err(Into::into)? { Async::Ready(r) => Ok(Async::Ready(r)), diff --git a/tokio-postgres/src/types/bit_vec.rs b/tokio-postgres/src/types/bit_vec.rs index cd3f4743c..5aeeae54c 100644 --- a/tokio-postgres/src/types/bit_vec.rs +++ b/tokio-postgres/src/types/bit_vec.rs @@ -1,6 +1,4 @@ -extern crate bit_vec; - -use self::bit_vec::BitVec; +use bit_vec::BitVec; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/chrono.rs b/tokio-postgres/src/types/chrono.rs index 2ab7da3b2..5d5e7ce0f 100644 --- a/tokio-postgres/src/types/chrono.rs +++ b/tokio-postgres/src/types/chrono.rs @@ -1,6 +1,4 @@ -extern crate chrono; - -use self::chrono::{ +use chrono::{ DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc, }; use postgres_protocol::types; diff --git a/tokio-postgres/src/types/eui48.rs b/tokio-postgres/src/types/eui48.rs index a4e1bb6b9..37406466a 100644 --- a/tokio-postgres/src/types/eui48.rs +++ b/tokio-postgres/src/types/eui48.rs @@ -1,6 +1,4 @@ -extern crate eui48; - -use self::eui48::MacAddress; +use eui48::MacAddress; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/geo.rs b/tokio-postgres/src/types/geo.rs index 82f9d317e..40983a2fa 100644 --- a/tokio-postgres/src/types/geo.rs +++ b/tokio-postgres/src/types/geo.rs @@ -1,6 +1,4 @@ -extern crate geo; - -use self::geo::{Coordinate, LineString, Point, Rect}; +use geo::{Coordinate, LineString, Point, Rect}; use fallible_iterator::FallibleIterator; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 9fcfef277..881a5dfc0 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -10,12 +10,12 @@ use std::fmt; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use types::type_gen::{Inner, Other}; +use crate::types::type_gen::{Inner, Other}; #[doc(inline)] pub use postgres_protocol::Oid; -pub use types::special::{Date, Timestamp}; +pub use crate::types::special::{Date, Timestamp}; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946684800; @@ -46,7 +46,7 @@ macro_rules! to_sql_checked { ty: &$crate::types::Type, out: &mut ::std::vec::Vec) -> ::std::result::Result<$crate::types::IsNull, - Box<::std::error::Error + + Box> { $crate::types::__to_sql_checked(self, ty, out) @@ -61,7 +61,7 @@ pub fn __to_sql_checked( v: &T, ty: &Type, out: &mut Vec, -) -> Result> +) -> Result> where T: ToSql, { @@ -95,7 +95,7 @@ pub use self::serde_json::Json; pub struct Type(Inner); impl fmt::Display for Type { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { "public" | "pg_catalog" => {} schema => write!(fmt, "{}.", schema)?, @@ -202,7 +202,7 @@ impl Field { pub struct WasNull; impl fmt::Display for WasNull { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.write_str(self.description()) } } @@ -219,7 +219,7 @@ impl Error for WasNull { pub struct WrongType(Type); impl fmt::Display for WrongType { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!( fmt, "cannot convert to or from a Postgres value of type `{}`", @@ -301,7 +301,7 @@ pub trait FromSql<'a>: Sized { /// /// The caller of this method is responsible for ensuring that this type /// is compatible with the Postgres `Type`. - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result>; + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result>; /// Creates a new value of this type from a `NULL` SQL value. /// @@ -311,7 +311,7 @@ pub trait FromSql<'a>: Sized { /// The default implementation returns /// `Err(Box::new(WasNull))`. #[allow(unused_variables)] - fn from_sql_null(ty: &Type) -> Result> { + fn from_sql_null(ty: &Type) -> Result> { Err(Box::new(WasNull)) } @@ -320,7 +320,7 @@ pub trait FromSql<'a>: Sized { fn from_sql_nullable( ty: &Type, raw: Option<&'a [u8]>, - ) -> Result> { + ) -> Result> { match raw { Some(raw) => Self::from_sql(ty, raw), None => Self::from_sql_null(ty), @@ -340,11 +340,11 @@ pub trait FromSqlOwned: for<'a> FromSql<'a> {} impl FromSqlOwned for T where T: for<'a> FromSql<'a> {} impl<'a, T: FromSql<'a>> FromSql<'a> for Option { - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { ::from_sql(ty, raw).map(Some) } - fn from_sql_null(_: &Type) -> Result, Box> { + fn from_sql_null(_: &Type) -> Result, Box> { Ok(None) } @@ -354,7 +354,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Option { } impl<'a, T: FromSql<'a>> FromSql<'a> for Vec { - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { let member_type = match *ty.kind() { Kind::Array(ref member) => member, _ => panic!("expected array type"), @@ -380,7 +380,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Vec { } impl<'a> FromSql<'a> for Vec { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { Ok(types::bytea_from_sql(raw).to_owned()) } @@ -388,7 +388,7 @@ impl<'a> FromSql<'a> for Vec { } impl<'a> FromSql<'a> for &'a [u8] { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a [u8], Box> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a [u8], Box> { Ok(types::bytea_from_sql(raw)) } @@ -396,7 +396,7 @@ impl<'a> FromSql<'a> for &'a [u8] { } impl<'a> FromSql<'a> for String { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { types::text_from_sql(raw).map(|b| b.to_owned()) } @@ -406,7 +406,7 @@ impl<'a> FromSql<'a> for String { } impl<'a> FromSql<'a> for &'a str { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { types::text_from_sql(raw) } @@ -422,7 +422,7 @@ impl<'a> FromSql<'a> for &'a str { macro_rules! simple_from { ($t:ty, $f:ident, $($expected:ident),+) => { impl<'a> FromSql<'a> for $t { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result<$t, Box> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result<$t, Box> { types::$f(raw) } @@ -444,7 +444,7 @@ impl<'a> FromSql<'a> for HashMap> { fn from_sql( _: &Type, raw: &'a [u8], - ) -> Result>, Box> { + ) -> Result>, Box> { types::hstore_from_sql(raw)? .map(|(k, v)| (k.to_owned(), v.map(str::to_owned))) .collect() @@ -456,7 +456,7 @@ impl<'a> FromSql<'a> for HashMap> { } impl<'a> FromSql<'a> for SystemTime { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { let time = types::timestamp_from_sql(raw)?; let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); @@ -550,7 +550,7 @@ pub trait ToSql: fmt::Debug { /// The return value indicates if this value should be represented as /// `NULL`. If this is the case, implementations **must not** write /// anything to `out`. - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> where Self: Sized; @@ -568,14 +568,14 @@ pub trait ToSql: fmt::Debug { &self, ty: &Type, out: &mut Vec, - ) -> Result>; + ) -> Result>; } impl<'a, T> ToSql for &'a T where T: ToSql, { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { (*self).to_sql(ty, out) } @@ -587,7 +587,7 @@ where } impl ToSql for Option { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { match *self { Some(ref val) => val.to_sql(ty, out), None => Ok(IsNull::Yes), @@ -602,7 +602,7 @@ impl ToSql for Option { } impl<'a, T: ToSql> ToSql for &'a [T] { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { let member_type = match *ty.kind() { Kind::Array(ref member) => member, _ => panic!("expected array type"), @@ -637,7 +637,7 @@ impl<'a, T: ToSql> ToSql for &'a [T] { } impl<'a> ToSql for &'a [u8] { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::bytea_to_sql(*self, w); Ok(IsNull::No) } @@ -648,7 +648,7 @@ impl<'a> ToSql for &'a [u8] { } impl ToSql for Vec { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { <&[T] as ToSql>::to_sql(&&**self, ty, w) } @@ -660,7 +660,7 @@ impl ToSql for Vec { } impl ToSql for Vec { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { <&[u8] as ToSql>::to_sql(&&**self, ty, w) } @@ -672,7 +672,7 @@ impl ToSql for Vec { } impl<'a> ToSql for &'a str { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::text_to_sql(*self, w); Ok(IsNull::No) } @@ -689,7 +689,7 @@ impl<'a> ToSql for &'a str { } impl<'a> ToSql for Cow<'a, str> { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { <&str as ToSql>::to_sql(&&self.as_ref(), ty, w) } @@ -701,7 +701,7 @@ impl<'a> ToSql for Cow<'a, str> { } impl ToSql for String { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { <&str as ToSql>::to_sql(&&**self, ty, w) } @@ -718,7 +718,7 @@ macro_rules! simple_to { fn to_sql(&self, _: &Type, w: &mut Vec) - -> Result> { + -> Result> { types::$f(*self, w); Ok(IsNull::No) } @@ -740,7 +740,7 @@ simple_to!(f32, float4_to_sql, FLOAT4); simple_to!(f64, float8_to_sql, FLOAT8); impl ToSql for HashMap> { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::hstore_to_sql( self.iter().map(|(k, v)| (&**k, v.as_ref().map(|v| &**v))), w, @@ -756,7 +756,7 @@ impl ToSql for HashMap> { } impl ToSql for SystemTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); let to_usec = @@ -776,7 +776,7 @@ impl ToSql for SystemTime { to_sql_checked!(); } -fn downcast(len: usize) -> Result> { +fn downcast(len: usize) -> Result> { if len > i32::max_value() as usize { Err("value too large to transmit".into()) } else { diff --git a/tokio-postgres/src/types/serde_json.rs b/tokio-postgres/src/types/serde_json.rs index 9544f6d24..0b9338303 100644 --- a/tokio-postgres/src/types/serde_json.rs +++ b/tokio-postgres/src/types/serde_json.rs @@ -1,8 +1,5 @@ -extern crate serde; -extern crate serde_json; - -use self::serde::{Deserialize, Serialize}; -use self::serde_json::Value; +use serde::{Deserialize, Serialize}; +use serde_json::Value; use std::error::Error; use std::fmt::Debug; use std::io::Read; diff --git a/tokio-postgres/src/types/special.rs b/tokio-postgres/src/types/special.rs index a0566319e..15538f7a3 100644 --- a/tokio-postgres/src/types/special.rs +++ b/tokio-postgres/src/types/special.rs @@ -2,7 +2,7 @@ use postgres_protocol::types; use std::error::Error; use std::{i32, i64}; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; /// A wrapper that can be used to represent infinity with `Type::Date` types. #[derive(Debug, Clone, Copy, PartialEq)] @@ -16,7 +16,7 @@ pub enum Date { } impl<'a, T: FromSql<'a>> FromSql<'a> for Date { - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { match types::date_from_sql(raw)? { i32::MAX => Ok(Date::PosInfinity), i32::MIN => Ok(Date::NegInfinity), @@ -30,7 +30,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Date { } impl ToSql for Date { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { let value = match *self { Date::PosInfinity => i32::MAX, Date::NegInfinity => i32::MIN, @@ -61,7 +61,7 @@ pub enum Timestamp { } impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp { - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { match types::timestamp_from_sql(raw)? { i64::MAX => Ok(Timestamp::PosInfinity), i64::MIN => Ok(Timestamp::NegInfinity), @@ -78,7 +78,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp { } impl ToSql for Timestamp { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { let value = match *self { Timestamp::PosInfinity => i64::MAX, Timestamp::NegInfinity => i64::MIN, diff --git a/tokio-postgres/src/types/type_gen.rs b/tokio-postgres/src/types/type_gen.rs index 4ce0c5fba..7dd28f42e 100644 --- a/tokio-postgres/src/types/type_gen.rs +++ b/tokio-postgres/src/types/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use types::{Kind, Oid, Type}; +use crate::types::{Kind, Oid, Type}; #[derive(PartialEq, Eq, Debug)] pub struct Other { diff --git a/tokio-postgres/src/types/uuid.rs b/tokio-postgres/src/types/uuid.rs index d7a190d01..2e34d01c3 100644 --- a/tokio-postgres/src/types/uuid.rs +++ b/tokio-postgres/src/types/uuid.rs @@ -1,6 +1,4 @@ -extern crate uuid; - -use self::uuid::Uuid; +use uuid::Uuid; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 6efa2ddce..06d85d648 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -1,15 +1,8 @@ -extern crate env_logger; -extern crate tokio; -extern crate tokio_postgres; +#![warn(rust_2018_idioms)] -#[macro_use] -extern crate futures; -#[macro_use] -extern crate log; - -use futures::future; -use futures::stream; use futures::sync::mpsc; +use futures::{future, stream, try_ready}; +use log::debug; use std::error::Error; use std::time::{Duration, Instant}; use tokio::net::TcpStream; @@ -244,7 +237,8 @@ fn query_portal() { "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); BEGIN;", - )).unwrap(); + )) + .unwrap(); let statement = runtime .block_on(client.prepare("SELECT id, name FROM foo ORDER BY id")) @@ -292,10 +286,12 @@ fn cancel_query() { .then(|r| { r.unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - }).then(|r| { + }) + .then(|r| { let s = r.unwrap(); tokio_postgres::cancel_query(s, NoTls, cancel_data) - }).then(|r| { + }) + .then(|r| { r.unwrap(); Ok::<(), ()>(()) }); @@ -321,7 +317,8 @@ fn custom_enum() { 'ok', 'happy' )", - )).unwrap(); + )) + .unwrap(); let select = client.prepare("SELECT $1::mood"); let select = runtime.block_on(select).unwrap(); @@ -352,7 +349,8 @@ fn custom_domain() { runtime .block_on(client.batch_execute( "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)", - )).unwrap(); + )) + .unwrap(); let select = client.prepare("SELECT $1::session_id"); let select = runtime.block_on(select).unwrap(); @@ -405,7 +403,8 @@ fn custom_composite() { supplier INTEGER, price NUMERIC )", - )).unwrap(); + )) + .unwrap(); let select = client.prepare("SELECT $1::inventory_item"); let select = runtime.block_on(select).unwrap(); @@ -442,7 +441,8 @@ fn custom_range() { subtype = float8, subtype_diff = float8mi )", - )).unwrap(); + )) + .unwrap(); let select = client.prepare("SELECT $1::floatrange"); let select = runtime.block_on(select).unwrap(); @@ -534,7 +534,8 @@ fn transaction_commit() { id SERIAL, name TEXT )", - )).unwrap(); + )) + .unwrap(); let f = client.batch_execute("INSERT INTO foo (name) VALUES ('steven')"); runtime.block_on(client.transaction().build(f)).unwrap(); @@ -544,7 +545,8 @@ fn transaction_commit() { client .prepare("SELECT name FROM foo") .and_then(|s| client.query(&s, &[]).collect()), - ).unwrap(); + ) + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "steven"); @@ -567,12 +569,13 @@ fn transaction_abort() { id SERIAL, name TEXT )", - )).unwrap(); + )) + .unwrap(); let f = client .batch_execute("INSERT INTO foo (name) VALUES ('steven')") - .map_err(|e| Box::new(e) as Box) - .and_then(|_| Err::<(), _>(Box::::from(""))); + .map_err(|e| Box::new(e) as Box) + .and_then(|_| Err::<(), _>(Box::::from(""))); runtime.block_on(client.transaction().build(f)).unwrap_err(); let rows = runtime @@ -580,7 +583,8 @@ fn transaction_abort() { client .prepare("SELECT name FROM foo") .and_then(|s| client.query(&s, &[]).collect()), - ).unwrap(); + ) + .unwrap(); assert_eq!(rows.len(), 0); } @@ -602,7 +606,8 @@ fn copy_in() { id INTEGER, name TEXT )", - )).unwrap(); + )) + .unwrap(); let stream = stream::iter_ok::<_, String>(vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()]); let rows = runtime @@ -610,7 +615,8 @@ fn copy_in() { client .prepare("COPY foo FROM STDIN") .and_then(|s| client.copy_in(&s, &[], stream)), - ).unwrap(); + ) + .unwrap(); assert_eq!(rows, 2); let rows = runtime @@ -618,7 +624,8 @@ fn copy_in() { client .prepare("SELECT id, name FROM foo ORDER BY id") .and_then(|s| client.query(&s, &[]).collect()), - ).unwrap(); + ) + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -644,7 +651,8 @@ fn copy_in_error() { id INTEGER, name TEXT )", - )).unwrap(); + )) + .unwrap(); let stream = stream::iter_result(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); let error = runtime @@ -652,7 +660,8 @@ fn copy_in_error() { client .prepare("COPY foo FROM STDIN") .and_then(|s| client.copy_in(&s, &[], stream)), - ).unwrap_err(); + ) + .unwrap_err(); assert!(error.to_string().contains("asdf")); let rows = runtime @@ -660,7 +669,8 @@ fn copy_in_error() { client .prepare("SELECT id, name FROM foo ORDER BY id") .and_then(|s| client.query(&s, &[]).collect()), - ).unwrap(); + ) + .unwrap(); assert_eq!(rows.len(), 0); } @@ -683,14 +693,16 @@ fn copy_out() { name TEXT ); INSERT INTO foo (name) VALUES ('jim'), ('joe');", - )).unwrap(); + )) + .unwrap(); let data = runtime .block_on( client .prepare("COPY foo TO STDOUT") .and_then(|s| client.copy_out(&s, &[]).concat2()), - ).unwrap(); + ) + .unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } @@ -719,7 +731,8 @@ fn transaction_builder_around_moved_client() { .prepare("INSERT INTO transaction_foo (name) VALUES ($1), ($2)") .map(|statement| (client, statement)) }) - }).and_then(|(mut client, statement)| { + }) + .and_then(|(mut client, statement)| { client .query(&statement, &[&"jim", &"joe"]) .collect() @@ -734,7 +747,8 @@ fn transaction_builder_around_moved_client() { client .prepare("COPY transaction_foo TO STDOUT") .and_then(|s| client.copy_out(&s, &[]).concat2()), - ).unwrap(); + ) + .unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); drop(client); From 6ff59acdd2969d53d4a509e0d21579fe7a230335 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 17:53:30 -0800 Subject: [PATCH 032/819] rustfmt --- codegen/src/sqlstate.rs | 9 ++++-- codegen/src/type_gen.rs | 42 ++++++++++++++++++--------- postgres-protocol/src/types.rs | 19 ++++++++---- tokio-postgres-native-tls/src/test.rs | 9 ++++-- tokio-postgres/src/proto/client.rs | 13 +++++++-- tokio-postgres/src/types/chrono.rs | 4 +-- tokio-postgres/src/types/geo.rs | 2 +- tokio-postgres/src/types/uuid.rs | 2 +- 8 files changed, 68 insertions(+), 32 deletions(-) diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index 791e47b11..a84d523ab 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -60,7 +60,8 @@ impl SqlState {{ &self.0 }} " - ).unwrap(); + ) + .unwrap(); } fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter) { @@ -74,7 +75,8 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< "#, name = name, code = code, - ).unwrap(); + ) + .unwrap(); } } @@ -87,7 +89,8 @@ fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter = " - ).unwrap(); + ) + .unwrap(); let mut builder = phf_codegen::Map::new(); for (code, names) in codes { builder.entry(&**code, &format!("SqlState::{}", &names[0])); diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 0f742d696..6c9c89a53 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -132,7 +132,8 @@ pub struct Other {{ pub schema: String, }} " - ).unwrap(); + ) + .unwrap(); } fn make_enum(w: &mut BufWriter, types: &BTreeMap) { @@ -141,7 +142,8 @@ fn make_enum(w: &mut BufWriter, types: &BTreeMap) { " #[derive(PartialEq, Eq, Clone, Debug)] pub enum Inner {{" - ).unwrap(); + ) + .unwrap(); for type_ in types.values() { write!( @@ -149,7 +151,8 @@ pub enum Inner {{" " {},", type_.variant - ).unwrap(); + ) + .unwrap(); } write!( @@ -159,7 +162,8 @@ pub enum Inner {{" }} " - ).unwrap(); + ) + .unwrap(); } fn make_impl(w: &mut BufWriter, types: &BTreeMap) { @@ -169,7 +173,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { pub fn from_oid(oid: Oid) -> Option {{ match oid {{ ", - ).unwrap(); + ) + .unwrap(); for (oid, type_) in types { write!( @@ -177,7 +182,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { " {} => Some(Inner::{}), ", oid, type_.variant - ).unwrap(); + ) + .unwrap(); } write!( @@ -189,7 +195,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { pub fn oid(&self) -> Oid {{ match *self {{ ", - ).unwrap(); + ) + .unwrap(); for (oid, type_) in types { write!( @@ -197,7 +204,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { " Inner::{} => {}, ", type_.variant, oid - ).unwrap(); + ) + .unwrap(); } write!( @@ -209,7 +217,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { pub fn kind(&self) -> &Kind {{ match *self {{ ", - ).unwrap(); + ) + .unwrap(); for type_ in types.values() { let kind = match type_.kind { @@ -227,7 +236,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { }} ", type_.variant, kind - ).unwrap(); + ) + .unwrap(); } write!( @@ -239,7 +249,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { pub fn name(&self) -> &str {{ match *self {{ "#, - ).unwrap(); + ) + .unwrap(); for type_ in types.values() { write!( @@ -247,7 +258,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { r#" Inner::{} => "{}", "#, type_.variant, type_.name - ).unwrap(); + ) + .unwrap(); } write!( @@ -257,7 +269,8 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { }} }} " - ).unwrap(); + ) + .unwrap(); } fn make_consts(w: &mut BufWriter, types: &BTreeMap) { @@ -272,7 +285,8 @@ fn make_consts(w: &mut BufWriter, types: &BTreeMap) { docs = type_.doc, ident = type_.ident, variant = type_.variant - ).unwrap(); + ) + .unwrap(); } write!(w, "}}").unwrap(); diff --git a/postgres-protocol/src/types.rs b/postgres-protocol/src/types.rs index b5d65cb33..7a764a9d5 100644 --- a/postgres-protocol/src/types.rs +++ b/postgres-protocol/src/types.rs @@ -167,7 +167,10 @@ pub fn float8_from_sql(mut buf: &[u8]) -> Result(values: I, buf: &mut Vec) -> Result<(), StdBox> +pub fn hstore_to_sql<'a, I>( + values: I, + buf: &mut Vec, +) -> Result<(), StdBox> where I: IntoIterator)>, { @@ -228,7 +231,9 @@ impl<'a> FallibleIterator for HstoreEntries<'a> { type Error = StdBox; #[inline] - fn next(&mut self) -> Result)>, StdBox> { + fn next( + &mut self, + ) -> Result)>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { return Err("invalid buffer size".into()); @@ -288,7 +293,9 @@ where /// Deserializes a `VARBIT` or `BIT` value. #[inline] -pub fn varbit_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn varbit_from_sql<'a>( + mut buf: &'a [u8], +) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { return Err("invalid varbit length".into()); @@ -1053,7 +1060,8 @@ mod test { None => Ok(IsNull::Yes), }, &mut buf, - ).unwrap(); + ) + .unwrap(); let array = array_from_sql(&buf).unwrap(); assert_eq!(array.has_nulls(), true); @@ -1089,7 +1097,8 @@ mod test { None => Ok(IsNull::Yes), }, &mut buf, - ).unwrap(); + ) + .unwrap(); let array = array_from_sql(&buf).unwrap(); assert_eq!(array.has_nulls(), false); diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 15371ad69..f98b563fb 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -38,7 +38,8 @@ fn require() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), - ).build() + ) + .build() .unwrap(); smoke_test( tokio_postgres::Builder::new() @@ -53,7 +54,8 @@ fn prefer() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), - ).build() + ) + .build() .unwrap(); smoke_test( tokio_postgres::Builder::new() @@ -68,7 +70,8 @@ fn scram_user() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), - ).build() + ) + .build() .unwrap(); smoke_test( tokio_postgres::Builder::new() diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index bae395ad2..1931773ba 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -161,7 +161,12 @@ impl Client { QueryStream::new(self.clone(), pending, portal.clone()) } - pub fn copy_in(&self, statement: &Statement, params: &[&dyn ToSql], stream: S) -> CopyInFuture + pub fn copy_in( + &self, + statement: &Statement, + params: &[&dyn ToSql], + stream: S, + ) -> CopyInFuture where S: Stream, S::Item: IntoBuf, @@ -236,7 +241,11 @@ impl Client { } } - fn excecute_message(&self, statement: &Statement, params: &[&dyn ToSql]) -> Result, Error> { + fn excecute_message( + &self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> Result, Error> { let mut buf = self.bind_message(statement, "", params)?; frontend::execute("", 0, &mut buf).map_err(Error::parse)?; frontend::sync(&mut buf); diff --git a/tokio-postgres/src/types/chrono.rs b/tokio-postgres/src/types/chrono.rs index 5d5e7ce0f..ed8a5c146 100644 --- a/tokio-postgres/src/types/chrono.rs +++ b/tokio-postgres/src/types/chrono.rs @@ -1,6 +1,4 @@ -use chrono::{ - DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc, -}; +use chrono::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/geo.rs b/tokio-postgres/src/types/geo.rs index 40983a2fa..95c0603c6 100644 --- a/tokio-postgres/src/types/geo.rs +++ b/tokio-postgres/src/types/geo.rs @@ -1,5 +1,5 @@ -use geo::{Coordinate, LineString, Point, Rect}; use fallible_iterator::FallibleIterator; +use geo::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/uuid.rs b/tokio-postgres/src/types/uuid.rs index 2e34d01c3..e68acf977 100644 --- a/tokio-postgres/src/types/uuid.rs +++ b/tokio-postgres/src/types/uuid.rs @@ -1,6 +1,6 @@ -use uuid::Uuid; use postgres_protocol::types; use std::error::Error; +use uuid::Uuid; use types::{FromSql, IsNull, ToSql, Type}; From 69a016fd85f0848037a93321bd7959df985c8346 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 18:01:03 -0800 Subject: [PATCH 033/819] Rename optional crate imports --- codegen/src/type_gen.rs | 2 +- tokio-postgres/Cargo.toml | 26 ++++++++--------- .../src/types/{bit_vec.rs => bit_vec_05.rs} | 8 +++--- .../src/types/{chrono.rs => chrono_04.rs} | 28 +++++++++---------- .../src/types/{eui48.rs => eui48_03.rs} | 8 +++--- .../src/types/{geo.rs => geo_010.rs} | 16 +++++------ tokio-postgres/src/types/mod.rs | 14 +++++----- .../types/{serde_json.rs => serde_json_1.rs} | 18 ++++++------ .../src/types/{uuid.rs => uuid_06.rs} | 8 +++--- 9 files changed, 64 insertions(+), 64 deletions(-) rename tokio-postgres/src/types/{bit_vec.rs => bit_vec_05.rs} (74%) rename tokio-postgres/src/types/{chrono.rs => chrono_04.rs} (82%) rename tokio-postgres/src/types/{eui48.rs => eui48_03.rs} (80%) rename tokio-postgres/src/types/{geo.rs => geo_010.rs} (78%) rename tokio-postgres/src/types/{serde_json.rs => serde_json_1.rs} (75%) rename tokio-postgres/src/types/{uuid.rs => uuid_06.rs} (69%) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 6c9c89a53..5fe0370ac 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -5,7 +5,7 @@ use std::fs::File; use std::io::{BufWriter, Write}; use std::path::Path; -use snake_to_camel; +use crate::snake_to_camel; const PG_TYPE_H: &'static str = include_str!("pg_type.h"); const PG_RANGE_H: &'static str = include_str!("pg_range.h"); diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ea8f66590..8f8b591f2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -24,12 +24,12 @@ features = [ circle-ci = { repository = "sfackler/rust-postgres" } [features] -"with-bit-vec-0.5" = ["bit-vec"] -"with-chrono-0.4" = ["chrono"] -"with-eui48-0.3" = ["eui48"] -"with-geo-0.10" = ["geo"] -with-serde_json-1 = ["serde", "serde_json"] -"with-uuid-0.6" = ["uuid"] +"with-bit-vec-0.5" = ["bit-vec-05"] +"with-chrono-0.4" = ["chrono-04"] +"with-eui48-0.3" = ["eui48-03"] +"with-geo-0.10" = ["geo-010"] +with-serde_json-1 = ["serde-1", "serde_json-1"] +"with-uuid-0.6" = ["uuid-06"] [dependencies] antidote = "1.0" @@ -45,13 +45,13 @@ tokio-codec = "0.1" tokio-io = "0.1" void = "1.0" -bit-vec = { version = "0.5", optional = true } -chrono = { version = "0.4", optional = true } -eui48 = { version = "0.3", optional = true } -geo = { version = "0.10", optional = true } -serde = { version = "1.0", optional = true } -serde_json = { version = "1.0", optional = true } -uuid = { version = "0.6", optional = true } +bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } +chrono-04 = { version = "0.4", package = "chrono", optional = true } +eui48-03 = { version = "0.3", package = "eui48", optional = true } +geo-010 = { version = "0.10", package = "geo", optional = true } +serde-1 = { version = "1.0", package = "serde", optional = true } +serde_json-1 = { version = "1.0", package = "serde_json", optional = true } +uuid-06 = { version = "0.6", package = "uuid", optional = true } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres/src/types/bit_vec.rs b/tokio-postgres/src/types/bit_vec_05.rs similarity index 74% rename from tokio-postgres/src/types/bit_vec.rs rename to tokio-postgres/src/types/bit_vec_05.rs index 5aeeae54c..4a0cdb91c 100644 --- a/tokio-postgres/src/types/bit_vec.rs +++ b/tokio-postgres/src/types/bit_vec_05.rs @@ -1,11 +1,11 @@ -use bit_vec::BitVec; +use bit_vec_05::BitVec; use postgres_protocol::types; use std::error::Error; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for BitVec { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let varbit = types::varbit_from_sql(raw)?; let mut bitvec = BitVec::from_bytes(varbit.bytes()); while bitvec.len() > varbit.len() { @@ -19,7 +19,7 @@ impl<'a> FromSql<'a> for BitVec { } impl ToSql for BitVec { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { types::varbit_to_sql(self.len(), self.to_bytes().into_iter(), out)?; Ok(IsNull::No) } diff --git a/tokio-postgres/src/types/chrono.rs b/tokio-postgres/src/types/chrono_04.rs similarity index 82% rename from tokio-postgres/src/types/chrono.rs rename to tokio-postgres/src/types/chrono_04.rs index ed8a5c146..212ba6ee0 100644 --- a/tokio-postgres/src/types/chrono.rs +++ b/tokio-postgres/src/types/chrono_04.rs @@ -1,15 +1,15 @@ -use chrono::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use chrono_04::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use postgres_protocol::types; use std::error::Error; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; fn base() -> NaiveDateTime { NaiveDate::from_ymd(2000, 1, 1).and_hms(0, 0, 0) } impl<'a> FromSql<'a> for NaiveDateTime { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; Ok(base() + Duration::microseconds(t)) } @@ -18,7 +18,7 @@ impl<'a> FromSql<'a> for NaiveDateTime { } impl ToSql for NaiveDateTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let time = match self.signed_duration_since(base()).num_microseconds() { Some(time) => time, None => return Err("value too large to transmit".into()), @@ -32,7 +32,7 @@ impl ToSql for NaiveDateTime { } impl<'a> FromSql<'a> for DateTime { - fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { + fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { let naive = NaiveDateTime::from_sql(type_, raw)?; Ok(DateTime::from_utc(naive, Utc)) } @@ -41,7 +41,7 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { self.naive_utc().to_sql(type_, w) } @@ -50,7 +50,7 @@ impl ToSql for DateTime { } impl<'a> FromSql<'a> for DateTime { - fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { + fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { let utc = DateTime::::from_sql(type_, raw)?; Ok(utc.with_timezone(&Local)) } @@ -59,7 +59,7 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } @@ -71,7 +71,7 @@ impl<'a> FromSql<'a> for DateTime { fn from_sql( type_: &Type, raw: &[u8], - ) -> Result, Box> { + ) -> Result, Box> { let utc = DateTime::::from_sql(type_, raw)?; Ok(utc.with_timezone(&FixedOffset::east(0))) } @@ -80,7 +80,7 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } @@ -89,7 +89,7 @@ impl ToSql for DateTime { } impl<'a> FromSql<'a> for NaiveDate { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; Ok(base().date() + Duration::days(jd as i64)) } @@ -98,7 +98,7 @@ impl<'a> FromSql<'a> for NaiveDate { } impl ToSql for NaiveDate { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let jd = self.signed_duration_since(base().date()).num_days(); if jd > i32::max_value() as i64 || jd < i32::min_value() as i64 { return Err("value too large to transmit".into()); @@ -113,7 +113,7 @@ impl ToSql for NaiveDate { } impl<'a> FromSql<'a> for NaiveTime { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let usec = types::time_from_sql(raw)?; Ok(NaiveTime::from_hms(0, 0, 0) + Duration::microseconds(usec)) } @@ -122,7 +122,7 @@ impl<'a> FromSql<'a> for NaiveTime { } impl ToSql for NaiveTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let delta = self.signed_duration_since(NaiveTime::from_hms(0, 0, 0)); let time = match delta.num_microseconds() { Some(time) => time, diff --git a/tokio-postgres/src/types/eui48.rs b/tokio-postgres/src/types/eui48_03.rs similarity index 80% rename from tokio-postgres/src/types/eui48.rs rename to tokio-postgres/src/types/eui48_03.rs index 37406466a..0ffa7f6aa 100644 --- a/tokio-postgres/src/types/eui48.rs +++ b/tokio-postgres/src/types/eui48_03.rs @@ -1,11 +1,11 @@ -use eui48::MacAddress; +use eui48_03::MacAddress; use postgres_protocol::types; use std::error::Error; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for MacAddress { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let bytes = types::macaddr_from_sql(raw)?; Ok(MacAddress::new(bytes)) } @@ -14,7 +14,7 @@ impl<'a> FromSql<'a> for MacAddress { } impl ToSql for MacAddress { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let mut bytes = [0; 6]; bytes.copy_from_slice(self.as_bytes()); types::macaddr_to_sql(bytes, w); diff --git a/tokio-postgres/src/types/geo.rs b/tokio-postgres/src/types/geo_010.rs similarity index 78% rename from tokio-postgres/src/types/geo.rs rename to tokio-postgres/src/types/geo_010.rs index 95c0603c6..cb44d426c 100644 --- a/tokio-postgres/src/types/geo.rs +++ b/tokio-postgres/src/types/geo_010.rs @@ -1,12 +1,12 @@ use fallible_iterator::FallibleIterator; -use geo::{Coordinate, LineString, Point, Rect}; +use geo_010::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for Point { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let point = types::point_from_sql(raw)?; Ok(Point::new(point.x(), point.y())) } @@ -15,7 +15,7 @@ impl<'a> FromSql<'a> for Point { } impl ToSql for Point { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { types::point_to_sql(self.x(), self.y(), out); Ok(IsNull::No) } @@ -25,7 +25,7 @@ impl ToSql for Point { } impl<'a> FromSql<'a> for Rect { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let rect = types::box_from_sql(raw)?; Ok(Rect { min: Coordinate { @@ -43,7 +43,7 @@ impl<'a> FromSql<'a> for Rect { } impl ToSql for Rect { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { types::box_to_sql(self.min.x, self.min.y, self.max.x, self.max.y, out); Ok(IsNull::No) } @@ -53,7 +53,7 @@ impl ToSql for Rect { } impl<'a> FromSql<'a> for LineString { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let path = types::path_from_sql(raw)?; let points = path .points() @@ -66,7 +66,7 @@ impl<'a> FromSql<'a> for LineString { } impl ToSql for LineString { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { let closed = false; // always encode an open path from LineString types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; Ok(IsNull::No) diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 881a5dfc0..8726d04a5 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -72,23 +72,23 @@ where } #[cfg(feature = "with-bit-vec-0.5")] -mod bit_vec; +mod bit_vec_05; #[cfg(feature = "with-chrono-0.4")] -mod chrono; +mod chrono_04; #[cfg(feature = "with-eui48-0.3")] -mod eui48; +mod eui48_03; #[cfg(feature = "with-geo-0.10")] -mod geo; +mod geo_010; #[cfg(feature = "with-serde_json-1")] -mod serde_json; +mod serde_json_1; #[cfg(feature = "with-uuid-0.6")] -mod uuid; +mod uuid_06; mod special; mod type_gen; #[cfg(feature = "with-serde_json-1")] -pub use self::serde_json::Json; +pub use crate::types::serde_json_1::Json; /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] diff --git a/tokio-postgres/src/types/serde_json.rs b/tokio-postgres/src/types/serde_json_1.rs similarity index 75% rename from tokio-postgres/src/types/serde_json.rs rename to tokio-postgres/src/types/serde_json_1.rs index 0b9338303..58860f45d 100644 --- a/tokio-postgres/src/types/serde_json.rs +++ b/tokio-postgres/src/types/serde_json_1.rs @@ -1,10 +1,10 @@ -use serde::{Deserialize, Serialize}; -use serde_json::Value; +use serde_1::{Deserialize, Serialize}; +use serde_json_1::Value; use std::error::Error; use std::fmt::Debug; use std::io::Read; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; #[derive(Debug)] pub struct Json(pub T); @@ -13,7 +13,7 @@ impl<'a, T> FromSql<'a> for Json where T: Deserialize<'a>, { - fn from_sql(ty: &Type, mut raw: &'a [u8]) -> Result, Box> { + fn from_sql(ty: &Type, mut raw: &'a [u8]) -> Result, Box> { if *ty == Type::JSONB { let mut b = [0; 1]; raw.read_exact(&mut b)?; @@ -22,7 +22,7 @@ where return Err("unsupported JSONB encoding version".into()); } } - serde_json::de::from_slice(raw) + serde_json_1::de::from_slice(raw) .map(Json) .map_err(Into::into) } @@ -34,11 +34,11 @@ impl ToSql for Json where T: Serialize + Debug, { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { if *ty == Type::JSONB { out.push(1); } - serde_json::ser::to_writer(out, &self.0)?; + serde_json_1::ser::to_writer(out, &self.0)?; Ok(IsNull::No) } @@ -47,7 +47,7 @@ where } impl<'a> FromSql<'a> for Value { - fn from_sql(ty: &Type, raw: &[u8]) -> Result> { + fn from_sql(ty: &Type, raw: &[u8]) -> Result> { Json::::from_sql(ty, raw).map(|json| json.0) } @@ -55,7 +55,7 @@ impl<'a> FromSql<'a> for Value { } impl ToSql for Value { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { Json(self).to_sql(ty, out) } diff --git a/tokio-postgres/src/types/uuid.rs b/tokio-postgres/src/types/uuid_06.rs similarity index 69% rename from tokio-postgres/src/types/uuid.rs rename to tokio-postgres/src/types/uuid_06.rs index e68acf977..07ef0adef 100644 --- a/tokio-postgres/src/types/uuid.rs +++ b/tokio-postgres/src/types/uuid_06.rs @@ -1,11 +1,11 @@ use postgres_protocol::types; use std::error::Error; -use uuid::Uuid; +use uuid_06::Uuid; -use types::{FromSql, IsNull, ToSql, Type}; +use crate::types::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for Uuid { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { let bytes = types::uuid_from_sql(raw)?; Ok(Uuid::from_bytes(&bytes).unwrap()) } @@ -14,7 +14,7 @@ impl<'a> FromSql<'a> for Uuid { } impl ToSql for Uuid { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::uuid_to_sql(*self.as_bytes(), w); Ok(IsNull::No) } From 12345a73b01c0320d0c66472315281f82ebc42a0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 18:09:19 -0800 Subject: [PATCH 034/819] Upgrade uuid and eui48 --- tokio-postgres/Cargo.toml | 8 ++++---- tokio-postgres/src/types/{eui48_03.rs => eui48_04.rs} | 2 +- tokio-postgres/src/types/mod.rs | 8 ++++---- tokio-postgres/src/types/{uuid_06.rs => uuid_07.rs} | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) rename tokio-postgres/src/types/{eui48_03.rs => eui48_04.rs} (96%) rename tokio-postgres/src/types/{uuid_06.rs => uuid_07.rs} (89%) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 8f8b591f2..68e184cc2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -26,10 +26,10 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] -"with-eui48-0.3" = ["eui48-03"] +"with-eui48-0.4" = ["eui48-04"] "with-geo-0.10" = ["geo-010"] with-serde_json-1 = ["serde-1", "serde_json-1"] -"with-uuid-0.6" = ["uuid-06"] +"with-uuid-0.7" = ["uuid-07"] [dependencies] antidote = "1.0" @@ -47,11 +47,11 @@ void = "1.0" bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } -eui48-03 = { version = "0.3", package = "eui48", optional = true } +eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-010 = { version = "0.10", package = "geo", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } -uuid-06 = { version = "0.6", package = "uuid", optional = true } +uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres/src/types/eui48_03.rs b/tokio-postgres/src/types/eui48_04.rs similarity index 96% rename from tokio-postgres/src/types/eui48_03.rs rename to tokio-postgres/src/types/eui48_04.rs index 0ffa7f6aa..d0a67f8a4 100644 --- a/tokio-postgres/src/types/eui48_03.rs +++ b/tokio-postgres/src/types/eui48_04.rs @@ -1,4 +1,4 @@ -use eui48_03::MacAddress; +use eui48_04::MacAddress; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 8726d04a5..6b212ea77 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -75,14 +75,14 @@ where mod bit_vec_05; #[cfg(feature = "with-chrono-0.4")] mod chrono_04; -#[cfg(feature = "with-eui48-0.3")] -mod eui48_03; +#[cfg(feature = "with-eui48-0.4")] +mod eui48_04; #[cfg(feature = "with-geo-0.10")] mod geo_010; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "with-uuid-0.6")] -mod uuid_06; +#[cfg(feature = "with-uuid-0.7")] +mod uuid_07; mod special; mod type_gen; diff --git a/tokio-postgres/src/types/uuid_06.rs b/tokio-postgres/src/types/uuid_07.rs similarity index 89% rename from tokio-postgres/src/types/uuid_06.rs rename to tokio-postgres/src/types/uuid_07.rs index 07ef0adef..c78d8a726 100644 --- a/tokio-postgres/src/types/uuid_06.rs +++ b/tokio-postgres/src/types/uuid_07.rs @@ -1,13 +1,13 @@ use postgres_protocol::types; use std::error::Error; -use uuid_06::Uuid; +use uuid_07::Uuid; use crate::types::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for Uuid { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let bytes = types::uuid_from_sql(raw)?; - Ok(Uuid::from_bytes(&bytes).unwrap()) + Ok(Uuid::from_bytes(bytes)) } accepts!(UUID); From 6300e0c0c9e1bcc3198d474747676cb9d5672b45 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 8 Dec 2018 18:09:46 -0800 Subject: [PATCH 035/819] Fix all-features tests --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9ea844988..2cb94e20b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,5 +34,5 @@ jobs: - run: rustc --version > ~/rust-version - *RESTORE_DEPS - run: cargo test --all - - run: cargo test -p tokio-postgres --all-features + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features - *SAVE_DEPS From 9f7ccebd8b812610c62d928a6d2c2fef64846f10 Mon Sep 17 00:00:00 2001 From: Damir Vandic Date: Sun, 9 Dec 2018 17:13:14 +0100 Subject: [PATCH 036/819] Update features for meta data docs.rs --- tokio-postgres/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 68e184cc2..00a09630e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -14,10 +14,10 @@ categories = ["database"] features = [ "with-bit-vec-0.5", "with-chrono-0.4", - "with-eui48-0.3", + "with-eui48-0.4", "with-geo-0.10", "with-serde_json-1", - "with-uuid-0.6", + "with-uuid-0.7", ] [badges] From 0db96ff62e649491d06ff8150aa2d36fbee4583c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Dec 2018 16:18:21 -0800 Subject: [PATCH 037/819] Add back type tests --- tokio-postgres/tests/test.rs | 2 + tokio-postgres/tests/types/bit_vec_07.rs | 29 + tokio-postgres/tests/types/chrono_04.rs | 148 +++++ tokio-postgres/tests/types/eui48_04.rs | 17 + tokio-postgres/tests/types/geo_010.rs | 57 ++ tokio-postgres/tests/types/mod.rs | 612 +++++++++++++++++++++ tokio-postgres/tests/types/serde_json_1.rs | 39 ++ tokio-postgres/tests/types/uuid_07.rs | 17 + 8 files changed, 921 insertions(+) create mode 100644 tokio-postgres/tests/types/bit_vec_07.rs create mode 100644 tokio-postgres/tests/types/chrono_04.rs create mode 100644 tokio-postgres/tests/types/eui48_04.rs create mode 100644 tokio-postgres/tests/types/geo_010.rs create mode 100644 tokio-postgres/tests/types/mod.rs create mode 100644 tokio-postgres/tests/types/serde_json_1.rs create mode 100644 tokio-postgres/tests/types/uuid_07.rs diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 06d85d648..9df119df8 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -13,6 +13,8 @@ use tokio_postgres::error::SqlState; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; +mod types; + fn connect( builder: &tokio_postgres::Builder, ) -> impl Future), Error = tokio_postgres::Error> { diff --git a/tokio-postgres/tests/types/bit_vec_07.rs b/tokio-postgres/tests/types/bit_vec_07.rs new file mode 100644 index 000000000..3ab29ee72 --- /dev/null +++ b/tokio-postgres/tests/types/bit_vec_07.rs @@ -0,0 +1,29 @@ +use bit_vec_07::BitVec; + +use crate::types::test_type; + +#[test] +fn test_bit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "BIT(14)", + &[(Some(bv), "B'01101001000001'"), (None, "NULL")], + ) +} + +#[test] +fn test_varbit_params() { + let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); + bv.pop(); + bv.pop(); + test_type( + "VARBIT", + &[ + (Some(bv), "B'01101001000001'"), + (Some(BitVec::from_bytes(&[])), "B''"), + (None, "NULL"), + ], + ) +} diff --git a/tokio-postgres/tests/types/chrono_04.rs b/tokio-postgres/tests/types/chrono_04.rs new file mode 100644 index 000000000..224a5487b --- /dev/null +++ b/tokio-postgres/tests/types/chrono_04.rs @@ -0,0 +1,148 @@ +use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; +use tokio_postgres::types::{Date, Timestamp}; + +use crate::types::test_type; + +#[test] +fn test_naive_date_time_params() { + fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + ( + Some(NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_with_special_naive_date_time_params() { + fn make_check<'a>(time: &'a str) -> (Timestamp, &'a str) { + ( + Timestamp::Value( + NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), + ), + time, + ) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ); +} + +#[test] +fn test_date_time_params() { + fn make_check<'a>(time: &'a str) -> (Option>, &'a str) { + ( + Some( + Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") + .unwrap(), + ), + time, + ) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_with_special_date_time_params() { + fn make_check<'a>(time: &'a str) -> (Timestamp>, &'a str) { + ( + Timestamp::Value( + Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") + .unwrap(), + ), + time, + ) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ); +} + +#[test] +fn test_date_params() { + fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + ( + Some(NaiveDate::parse_from_str(time, "'%Y-%m-%d'").unwrap()), + time, + ) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_with_special_date_params() { + fn make_check<'a>(date: &'a str) -> (Date, &'a str) { + ( + Date::Value(NaiveDate::parse_from_str(date, "'%Y-%m-%d'").unwrap()), + date, + ) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ); +} + +#[test] +fn test_time_params() { + fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + ( + Some(NaiveTime::parse_from_str(time, "'%H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIME", + &[ + make_check("'00:00:00.010000000'"), + make_check("'11:19:33.100314000'"), + make_check("'23:11:45.120200000'"), + (None, "NULL"), + ], + ); +} diff --git a/tokio-postgres/tests/types/eui48_04.rs b/tokio-postgres/tests/types/eui48_04.rs new file mode 100644 index 000000000..a881e24f0 --- /dev/null +++ b/tokio-postgres/tests/types/eui48_04.rs @@ -0,0 +1,17 @@ +use eui48_04::MacAddress; + +use crate::types::test_type; + +#[test] +fn test_eui48_params() { + test_type( + "MACADDR", + &[ + ( + Some(MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), + "'12-34-56-ab-cd-ef'", + ), + (None, "NULL"), + ], + ) +} diff --git a/tokio-postgres/tests/types/geo_010.rs b/tokio-postgres/tests/types/geo_010.rs new file mode 100644 index 000000000..a782a7362 --- /dev/null +++ b/tokio-postgres/tests/types/geo_010.rs @@ -0,0 +1,57 @@ +use geo_010::{Coordinate, LineString, Point, Rect}; + +use crate::types::test_type; + +#[test] +fn test_point_params() { + test_type( + "POINT", + &[ + (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), + (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_box_params() { + test_type( + "BOX", + &[ + ( + Some(Rect { + min: Coordinate { x: -3.14, y: 1.618 }, + max: Coordinate { + x: 160.0, + y: 69701.5615, + }, + }), + "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", + ), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_path_params() { + let points = vec![ + Coordinate { x: 0., y: 0. }, + Coordinate { x: -3.14, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, + ]; + test_type( + "PATH", + &[ + ( + Some(LineString(points)), + "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", + ), + (None, "NULL"), + ], + ); +} diff --git a/tokio-postgres/tests/types/mod.rs b/tokio-postgres/tests/types/mod.rs new file mode 100644 index 000000000..6ce526d0d --- /dev/null +++ b/tokio-postgres/tests/types/mod.rs @@ -0,0 +1,612 @@ +use futures::{Future, Stream}; +use std::collections::HashMap; +use std::error::Error; +use std::f32; +use std::f64; +use std::fmt; +use std::result; +use std::time::{Duration, UNIX_EPOCH}; +use tokio::runtime::current_thread::Runtime; +use tokio_postgres::to_sql_checked; +use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; + +use crate::connect; + +#[cfg(feature = "with-bit-vec-0.7")] +mod bit_vec_07; +#[cfg(feature = "with-chrono-0.4")] +mod chrono_04; +#[cfg(feature = "with-eui48-0.4")] +mod eui48_04; +#[cfg(feature = "with-geo-0.10")] +mod geo_010; +#[cfg(feature = "with-serde_json-1")] +mod serde_json_1; +#[cfg(feature = "with-uuid-0.7")] +mod uuid_07; + +fn test_type(sql_type: &str, checks: &[(T, S)]) +where + T: PartialEq + for<'a> FromSqlOwned + ToSql, + S: fmt::Display, +{ + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + for &(ref val, ref repr) in checks.iter() { + let prepare = client.prepare(&format!("SELECT {}::{}", repr, sql_type)); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let rows = runtime.block_on(query).unwrap(); + let result = rows[0].get(0); + assert_eq!(val, &result); + + let prepare = client.prepare(&format!("SELECT $1::{}", sql_type)); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[val]).collect(); + let rows = runtime.block_on(query).unwrap(); + let result = rows[0].get(0); + assert_eq!(val, &result); + } +} + +#[test] +fn test_bool_params() { + test_type( + "BOOL", + &[(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], + ); +} + +#[test] +fn test_i8_params() { + test_type("\"char\"", &[(Some('a' as i8), "'a'"), (None, "NULL")]); +} + +#[test] +fn test_name_params() { + test_type( + "NAME", + &[ + (Some("hello world".to_owned()), "'hello world'"), + ( + Some("イロハニホヘト チリヌルヲ".to_owned()), + "'イロハニホヘト チリヌルヲ'", + ), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_i16_params() { + test_type( + "SMALLINT", + &[ + (Some(15001i16), "15001"), + (Some(-15001i16), "-15001"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_i32_params() { + test_type( + "INT", + &[ + (Some(2147483548i32), "2147483548"), + (Some(-2147483548i32), "-2147483548"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_oid_params() { + test_type( + "OID", + &[ + (Some(2147483548u32), "2147483548"), + (Some(4000000000), "4000000000"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_i64_params() { + test_type( + "BIGINT", + &[ + (Some(9223372036854775708i64), "9223372036854775708"), + (Some(-9223372036854775708i64), "-9223372036854775708"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_f32_params() { + test_type( + "REAL", + &[ + (Some(f32::INFINITY), "'infinity'"), + (Some(f32::NEG_INFINITY), "'-infinity'"), + (Some(1000.55), "1000.55"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_f64_params() { + test_type( + "DOUBLE PRECISION", + &[ + (Some(f64::INFINITY), "'infinity'"), + (Some(f64::NEG_INFINITY), "'-infinity'"), + (Some(10000.55), "10000.55"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_varchar_params() { + test_type( + "VARCHAR", + &[ + (Some("hello world".to_owned()), "'hello world'"), + ( + Some("イロハニホヘト チリヌルヲ".to_owned()), + "'イロハニホヘト チリヌルヲ'", + ), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_text_params() { + test_type( + "TEXT", + &[ + (Some("hello world".to_owned()), "'hello world'"), + ( + Some("イロハニホヘト チリヌルヲ".to_owned()), + "'イロハニホヘト チリヌルヲ'", + ), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_borrowed_text() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let prepare = client.prepare("SELECT 'foo'"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let rows = runtime.block_on(query).unwrap(); + let s: &str = rows[0].get(0); + assert_eq!(s, "foo"); +} + +#[test] +fn test_bpchar_params() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + b CHAR(5) + )", + ); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); + let stmt = runtime.block_on(prepare).unwrap(); + let execute = client.execute(&stmt, &[&"12345", &"123", &None::<&'static str>]); + runtime.block_on(execute).unwrap(); + + let prepare = client.prepare("SELECT b FROM foo ORDER BY id"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let res = runtime.block_on(query).unwrap(); + + assert_eq!( + vec![Some("12345".to_owned()), Some("123 ".to_owned()), None], + res.iter().map(|row| row.get(0)).collect::>() + ); +} + +#[test] +fn test_citext_params() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + b CITEXT + )", + ); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); + let stmt = runtime.block_on(prepare).unwrap(); + let execute = client.execute(&stmt, &[&"foobar", &"FooBar", &None::<&'static str>]); + runtime.block_on(execute).unwrap(); + + let prepare = client.prepare("SELECT b FROM foo WHERE b = 'FOOBAR' ORDER BY id"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let res = runtime.block_on(query).unwrap(); + + assert_eq!( + vec!["foobar".to_string(), "FooBar".to_string()], + res.iter() + .map(|row| row.get::<_, String>(0)) + .collect::>() + ); +} + +#[test] +fn test_bytea_params() { + test_type( + "BYTEA", + &[ + (Some(vec![0u8, 1, 2, 3, 254, 255]), "'\\x00010203feff'"), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_borrowed_bytea() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let prepare = client.prepare("SELECT 'foo'::BYTEA"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let rows = runtime.block_on(query).unwrap(); + let s: &[u8] = rows[0].get(0); + assert_eq!(s, b"foo"); +} + +#[test] +fn test_hstore_params() { + macro_rules! make_map { + ($($k:expr => $v:expr),+) => ({ + let mut map = HashMap::new(); + $(map.insert($k, $v);)+ + map + }) + } + test_type( + "hstore", + &[ + ( + Some(make_map!("a".to_owned() => Some("1".to_owned()))), + "'a=>1'", + ), + ( + Some(make_map!("hello".to_owned() => Some("world!".to_owned()), + "hola".to_owned() => Some("mundo!".to_owned()), + "what".to_owned() => None)), + "'hello=>world!,hola=>mundo!,what=>NULL'", + ), + (None, "NULL"), + ], + ); +} + +#[test] +fn test_array_params() { + test_type( + "integer[]", + &[ + (Some(vec![1i32, 2i32]), "ARRAY[1,2]"), + (Some(vec![1i32]), "ARRAY[1]"), + (Some(vec![]), "ARRAY[]"), + (None, "NULL"), + ], + ); +} + +fn test_nan_param(sql_type: &str) +where + T: PartialEq + ToSql + FromSqlOwned, +{ + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let prepare = client.prepare(&format!("SELECT 'NaN'::{}", sql_type)); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let rows = runtime.block_on(query).unwrap(); + let val: T = rows[0].get(0); + assert!(val != val); +} + +#[test] +fn test_f32_nan_param() { + test_nan_param::("REAL"); +} + +#[test] +fn test_f64_nan_param() { + test_nan_param::("DOUBLE PRECISION"); +} + +#[test] +fn test_pg_database_datname() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let prepare = client.prepare("SELECT datname FROM pg_database"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[]).collect(); + let rows = runtime.block_on(query).unwrap(); + assert_eq!(rows[0].get::<_, &str>(0), "postgres"); +} + +#[test] +fn test_slice() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + f TEXT + ); + + INSERT INTO foo(f) VALUES ('a'), ('b'), ('c'), ('d'); + ", + ); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("SELECT f FROM foo WHERE id = ANY($1)"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client + .query(&stmt, &[&&[1i32, 3, 4][..]]) + .map(|r| r.get::<_, String>(0)) + .collect(); + let rows = runtime.block_on(query).unwrap(); + + assert_eq!(vec!["a".to_owned(), "c".to_owned(), "d".to_owned()], rows); +} + +#[test] +fn test_slice_wrong_type() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY + )", + ); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("SELECT * FROM foo WHERE id = ANY($1)"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[&&[&"hi"][..]]).collect(); + let err = runtime.block_on(query).err().unwrap(); + match err.source() { + Some(e) if e.is::() => {} + _ => panic!("Unexpected error {:?}", err), + }; +} + +#[test] +fn test_slice_range() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let prepare = client.prepare("SELECT $1::INT8RANGE"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[&&[&1i64][..]]).collect(); + let err = runtime.block_on(query).err().unwrap(); + match err.source() { + Some(e) if e.is::() => {} + _ => panic!("Unexpected error {:?}", err), + }; +} + +#[test] +fn domain() { + #[derive(Debug, PartialEq)] + struct SessionId(Vec); + + impl ToSql for SessionId { + fn to_sql( + &self, + ty: &Type, + out: &mut Vec, + ) -> result::Result> { + let inner = match *ty.kind() { + Kind::Domain(ref inner) => inner, + _ => unreachable!(), + }; + self.0.to_sql(inner, out) + } + + fn accepts(ty: &Type) -> bool { + ty.name() == "session_id" + && match *ty.kind() { + Kind::Domain(_) => true, + _ => false, + } + } + + to_sql_checked!(); + } + + impl<'a> FromSql<'a> for SessionId { + fn from_sql(ty: &Type, raw: &[u8]) -> result::Result> { + Vec::::from_sql(ty, raw).map(SessionId) + } + + fn accepts(ty: &Type) -> bool { + // This is super weird! + as FromSql>::accepts(ty) + } + } + + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); + CREATE TABLE pg_temp.foo (id pg_temp.session_id);", + ); + runtime.block_on(batch).unwrap(); + + let id = SessionId(b"0123456789abcdef".to_vec()); + + let prepare = client.prepare("INSERT INTO pg_temp.foo (id) VALUES ($1)"); + let stmt = runtime.block_on(prepare).unwrap(); + let execute = client.execute(&stmt, &[&id]); + runtime.block_on(execute).unwrap(); + + let prepare = client.prepare("SELECT id FROM pg_temp.foo"); + let stmt = runtime.block_on(prepare).unwrap(); + let query = client.query(&stmt, &[&id]).collect(); + let rows = runtime.block_on(query).unwrap(); + assert_eq!(id, rows[0].get(0)); +} + +#[test] +fn composite() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier INTEGER, + price NUMERIC + )", + ); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("SELECT $1::inventory_item"); + let stmt = runtime.block_on(prepare).unwrap(); + let type_ = &stmt.params()[0]; + assert_eq!(type_.name(), "inventory_item"); + match *type_.kind() { + Kind::Composite(ref fields) => { + assert_eq!(fields[0].name(), "name"); + assert_eq!(fields[0].type_(), &Type::TEXT); + assert_eq!(fields[1].name(), "supplier"); + assert_eq!(fields[1].type_(), &Type::INT4); + assert_eq!(fields[2].name(), "price"); + assert_eq!(fields[2].type_(), &Type::NUMERIC); + } + ref t => panic!("bad type {:?}", t), + } +} + +#[test] +fn enum_() { + let mut runtime = Runtime::new().unwrap(); + + let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let (mut client, connection) = runtime.block_on(handshake).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let batch = client.batch_execute("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy');"); + runtime.block_on(batch).unwrap(); + + let prepare = client.prepare("SELECT $1::mood"); + let stmt = runtime.block_on(prepare).unwrap(); + let type_ = &stmt.params()[0]; + assert_eq!(type_.name(), "mood"); + match *type_.kind() { + Kind::Enum(ref variants) => { + assert_eq!( + variants, + &["sad".to_owned(), "ok".to_owned(), "happy".to_owned()] + ); + } + _ => panic!("bad type"), + } +} + +#[test] +fn system_time() { + test_type( + "TIMESTAMP", + &[ + ( + Some(UNIX_EPOCH + Duration::from_millis(1_010)), + "'1970-01-01 00:00:01.01'", + ), + ( + Some(UNIX_EPOCH - Duration::from_millis(1_010)), + "'1969-12-31 23:59:58.99'", + ), + ( + Some(UNIX_EPOCH + Duration::from_millis(946684800 * 1000 + 1_010)), + "'2000-01-01 00:00:01.01'", + ), + (None, "NULL"), + ], + ); +} diff --git a/tokio-postgres/tests/types/serde_json_1.rs b/tokio-postgres/tests/types/serde_json_1.rs new file mode 100644 index 000000000..37d431f6e --- /dev/null +++ b/tokio-postgres/tests/types/serde_json_1.rs @@ -0,0 +1,39 @@ +use serde_json_1::Value; + +use crate::types::test_type; + +#[test] +fn test_json_params() { + test_type( + "JSON", + &[ + ( + Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), + "'[10, 11, 12]'", + ), + ( + Some(serde_json_1::from_str::("{\"f\": \"asd\"}").unwrap()), + "'{\"f\": \"asd\"}'", + ), + (None, "NULL"), + ], + ) +} + +#[test] +fn test_jsonb_params() { + test_type( + "JSONB", + &[ + ( + Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), + "'[10, 11, 12]'", + ), + ( + Some(serde_json_1::from_str::("{\"f\": \"asd\"}").unwrap()), + "'{\"f\": \"asd\"}'", + ), + (None, "NULL"), + ], + ) +} diff --git a/tokio-postgres/tests/types/uuid_07.rs b/tokio-postgres/tests/types/uuid_07.rs new file mode 100644 index 000000000..1d51a8ad9 --- /dev/null +++ b/tokio-postgres/tests/types/uuid_07.rs @@ -0,0 +1,17 @@ +use uuid_07::Uuid; + +use crate::types::test_type; + +#[test] +fn test_uuid_params() { + test_type( + "UUID", + &[ + ( + Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), + "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", + ), + (None, "NULL"), + ], + ) +} From 8c3338e37df31ac06db4ed67d6d0ca11185f1275 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Dec 2018 18:00:53 -0800 Subject: [PATCH 038/819] Enforce rustfmt in CI --- .circleci/config.yml | 2 ++ tokio-postgres/src/types/chrono_04.rs | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2cb94e20b..3af994496 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,6 +28,8 @@ jobs: - image: sfackler/rust-postgres-test:5 steps: - checkout + - run: rustup component add rustfmt + - run: cargo fmt --all -- --check - *RESTORE_REGISTRY - run: cargo generate-lockfile - *SAVE_REGISTRY diff --git a/tokio-postgres/src/types/chrono_04.rs b/tokio-postgres/src/types/chrono_04.rs index 212ba6ee0..91ec6a801 100644 --- a/tokio-postgres/src/types/chrono_04.rs +++ b/tokio-postgres/src/types/chrono_04.rs @@ -41,7 +41,11 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql( + &self, + type_: &Type, + w: &mut Vec, + ) -> Result> { self.naive_utc().to_sql(type_, w) } @@ -59,7 +63,11 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql( + &self, + type_: &Type, + w: &mut Vec, + ) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } @@ -80,7 +88,11 @@ impl<'a> FromSql<'a> for DateTime { } impl ToSql for DateTime { - fn to_sql(&self, type_: &Type, w: &mut Vec) -> Result> { + fn to_sql( + &self, + type_: &Type, + w: &mut Vec, + ) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } From 04ce4bb7a1012339580442f7fd3a5e948a4053c3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Dec 2018 21:23:31 -0800 Subject: [PATCH 039/819] Run clippy --- .circleci/config.yml | 3 +- codegen/src/main.rs | 2 + codegen/src/sqlstate.rs | 7 +- codegen/src/type_gen.rs | 9 +- postgres-protocol/src/authentication/sasl.rs | 18 +- postgres-protocol/src/lib.rs | 2 +- postgres-protocol/src/message/backend.rs | 89 ++- postgres-protocol/src/message/frontend.rs | 40 +- postgres-protocol/src/types.rs | 44 +- tokio-postgres-native-tls/src/lib.rs | 7 +- tokio-postgres-openssl/src/lib.rs | 2 +- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/builder.rs | 6 + tokio-postgres/src/error/mod.rs | 33 +- tokio-postgres/src/error/sqlstate.rs | 566 +++++++++--------- tokio-postgres/src/lib.rs | 7 +- tokio-postgres/src/proto/client.rs | 4 +- tokio-postgres/src/proto/connection.rs | 2 +- tokio-postgres/src/proto/prepare.rs | 2 + tokio-postgres/src/proto/query.rs | 2 +- tokio-postgres/src/proto/row.rs | 4 +- tokio-postgres/src/proto/transaction.rs | 4 +- tokio-postgres/src/proto/typeinfo.rs | 4 +- .../src/proto/typeinfo_composite.rs | 2 +- tokio-postgres/src/proto/typeinfo_enum.rs | 4 +- tokio-postgres/src/rows.rs | 19 +- tokio-postgres/src/stmt.rs | 5 +- tokio-postgres/src/types/mod.rs | 30 +- tokio-postgres/src/types/type_gen.rs | 493 +++++---------- 29 files changed, 622 insertions(+), 790 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3af994496..0f23fddce 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,8 +28,9 @@ jobs: - image: sfackler/rust-postgres-test:5 steps: - checkout - - run: rustup component add rustfmt + - run: rustup component add rustfmt clippy - run: cargo fmt --all -- --check + - run: cargo clippy --all - *RESTORE_REGISTRY - run: cargo generate-lockfile - *SAVE_REGISTRY diff --git a/codegen/src/main.rs b/codegen/src/main.rs index 70e734c7f..e6559aca1 100644 --- a/codegen/src/main.rs +++ b/codegen/src/main.rs @@ -1,3 +1,5 @@ +#![warn(clippy::all)] + extern crate linked_hash_map; extern crate marksman_escape; extern crate phf_codegen; diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index a84d523ab..c940f0167 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -4,7 +4,7 @@ use std::fs::File; use std::io::{BufWriter, Write}; use std::path::Path; -const ERRCODES_TXT: &'static str = include_str!("errcodes.txt"); +const ERRCODES_TXT: &str = include_str!("errcodes.txt"); pub fn build(path: &Path) { let mut file = BufWriter::new(File::create(path.join("error/sqlstate.rs")).unwrap()); @@ -20,7 +20,7 @@ fn parse_codes() -> LinkedHashMap> { let mut codes = LinkedHashMap::new(); for line in ERRCODES_TXT.lines() { - if line.starts_with("#") || line.starts_with("Section") || line.trim().is_empty() { + if line.starts_with('#') || line.starts_with("Section") || line.trim().is_empty() { continue; } @@ -39,7 +39,6 @@ fn make_type(file: &mut BufWriter) { write!( file, "// Autogenerated file - DO NOT EDIT -use phf; use std::borrow::Cow; /// A SQLSTATE error code @@ -96,5 +95,5 @@ static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = " builder.entry(&**code, &format!("SqlState::{}", &names[0])); } builder.build(file).unwrap(); - write!(file, ";\n").unwrap(); + writeln!(file, ";").unwrap(); } diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 5fe0370ac..18be4bfc5 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -7,8 +7,8 @@ use std::path::Path; use crate::snake_to_camel; -const PG_TYPE_H: &'static str = include_str!("pg_type.h"); -const PG_RANGE_H: &'static str = include_str!("pg_range.h"); +const PG_TYPE_H: &str = include_str!("pg_type.h"); +const PG_RANGE_H: &str = include_str!("pg_range.h"); struct Type { name: &'static str, @@ -122,7 +122,7 @@ fn make_header(w: &mut BufWriter) { "// Autogenerated file - DO NOT EDIT use std::sync::Arc; -use types::{{Type, Oid, Kind}}; +use crate::types::{{Type, Oid, Kind}}; #[derive(PartialEq, Eq, Debug)] pub struct Other {{ @@ -231,8 +231,7 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { write!( w, " Inner::{} => {{ - const V: &'static Kind = &Kind::{}; - V + &Kind::{} }} ", type_.variant, kind diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 8488fa3c8..4e34a8c87 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -14,9 +14,9 @@ use std::str; const NONCE_LENGTH: usize = 24; /// The identifier of the SCRAM-SHA-256 SASL authentication mechanism. -pub const SCRAM_SHA_256: &'static str = "SCRAM-SHA-256"; +pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256"; /// The identifier of the SCRAM-SHA-256-PLUS SASL authentication mechanism. -pub const SCRAM_SHA_256_PLUS: &'static str = "SCRAM-SHA-256-PLUS"; +pub const SCRAM_SHA_256_PLUS: &str = "SCRAM-SHA-256-PLUS"; // since postgres passwords are not required to exclude saslprep-prohibited // characters or even be valid UTF8, we run saslprep if possible and otherwise @@ -153,7 +153,7 @@ impl ScramSha256 { state: State::Update { nonce, password: normalize(password), - channel_binding: channel_binding, + channel_binding, }, } } @@ -228,8 +228,8 @@ impl ScramSha256 { write!(&mut self.message, ",p={}", base64::encode(&*client_proof)).unwrap(); self.state = State::Finish { - salted_password: salted_password, - auth_message: auth_message, + salted_password, + auth_message, }; Ok(()) } @@ -288,7 +288,7 @@ struct Parser<'a> { impl<'a> Parser<'a> { fn new(s: &'a str) -> Parser<'a> { Parser { - s: s, + s, it: s.char_indices().peekable(), } } @@ -390,9 +390,9 @@ impl<'a> Parser<'a> { self.eof()?; Ok(ServerFirstMessage { - nonce: nonce, - salt: salt, - iteration_count: iteration_count, + nonce, + salt, + iteration_count, }) } diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index aa815c361..4cdefcd20 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -10,7 +10,7 @@ //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. #![doc(html_root_url = "https://docs.rs/postgres-protocol/0.3")] -#![warn(missing_docs, rust_2018_idioms)] +#![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; use std::io; diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index c11516d13..f9c37b590 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -88,40 +88,37 @@ impl Message { let channel = buf.read_cstr()?; let message = buf.read_cstr()?; Message::NotificationResponse(NotificationResponseBody { - process_id: process_id, - channel: channel, - message: message, + process_id, + channel, + message, }) } b'c' => Message::CopyDone, b'C' => { let tag = buf.read_cstr()?; - Message::CommandComplete(CommandCompleteBody { tag: tag }) + Message::CommandComplete(CommandCompleteBody { tag }) } b'd' => { let storage = buf.read_all(); - Message::CopyData(CopyDataBody { storage: storage }) + Message::CopyData(CopyDataBody { storage }) } b'D' => { let len = buf.read_u16::()?; let storage = buf.read_all(); - Message::DataRow(DataRowBody { - storage: storage, - len: len, - }) + Message::DataRow(DataRowBody { storage, len }) } b'E' => { let storage = buf.read_all(); - Message::ErrorResponse(ErrorResponseBody { storage: storage }) + Message::ErrorResponse(ErrorResponseBody { storage }) } b'G' => { let format = buf.read_u8()?; let len = buf.read_u16::()?; let storage = buf.read_all(); Message::CopyInResponse(CopyInResponseBody { - format: format, - len: len, - storage: storage, + format, + len, + storage, }) } b'H' => { @@ -129,9 +126,9 @@ impl Message { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::CopyOutResponse(CopyOutResponseBody { - format: format, - len: len, - storage: storage, + format, + len, + storage, }) } b'I' => Message::EmptyQueryResponse, @@ -139,14 +136,14 @@ impl Message { let process_id = buf.read_i32::()?; let secret_key = buf.read_i32::()?; Message::BackendKeyData(BackendKeyDataBody { - process_id: process_id, - secret_key: secret_key, + process_id, + secret_key, }) } b'n' => Message::NoData, b'N' => { let storage = buf.read_all(); - Message::NoticeResponse(NoticeResponseBody { storage: storage }) + Message::NoticeResponse(NoticeResponseBody { storage }) } b'R' => match buf.read_i32::()? { 0 => Message::AuthenticationOk, @@ -155,7 +152,7 @@ impl Message { 5 => { let mut salt = [0; 4]; buf.read_exact(&mut salt)?; - Message::AuthenticationMd5Password(AuthenticationMd5PasswordBody { salt: salt }) + Message::AuthenticationMd5Password(AuthenticationMd5PasswordBody { salt }) } 6 => Message::AuthenticationScmCredential, 7 => Message::AuthenticationGss, @@ -187,30 +184,21 @@ impl Message { b'S' => { let name = buf.read_cstr()?; let value = buf.read_cstr()?; - Message::ParameterStatus(ParameterStatusBody { - name: name, - value: value, - }) + Message::ParameterStatus(ParameterStatusBody { name, value }) } b't' => { let len = buf.read_u16::()?; let storage = buf.read_all(); - Message::ParameterDescription(ParameterDescriptionBody { - storage: storage, - len: len, - }) + Message::ParameterDescription(ParameterDescriptionBody { storage, len }) } b'T' => { let len = buf.read_u16::()?; let storage = buf.read_all(); - Message::RowDescription(RowDescriptionBody { - storage: storage, - len: len, - }) + Message::RowDescription(RowDescriptionBody { storage, len }) } b'Z' => { let status = buf.read_u8()?; - Message::ReadyForQuery(ReadyForQueryBody { status: status }) + Message::ReadyForQuery(ReadyForQueryBody { status }) } tag => { return Err(io::Error::new( @@ -305,7 +293,7 @@ pub struct AuthenticationSaslBody(Bytes); impl AuthenticationSaslBody { #[inline] - pub fn mechanisms<'a>(&'a self) -> SaslMechanisms<'a> { + pub fn mechanisms(&self) -> SaslMechanisms<'_> { SaslMechanisms(&self.0) } } @@ -410,7 +398,7 @@ impl CopyInResponseBody { } #[inline] - pub fn column_formats<'a>(&'a self) -> ColumnFormats<'a> { + pub fn column_formats(&self) -> ColumnFormats<'_> { ColumnFormats { remaining: self.len, buf: &self.storage, @@ -464,7 +452,7 @@ impl CopyOutResponseBody { } #[inline] - pub fn column_formats<'a>(&'a self) -> ColumnFormats<'a> { + pub fn column_formats(&self) -> ColumnFormats<'_> { ColumnFormats { remaining: self.len, buf: &self.storage, @@ -479,7 +467,7 @@ pub struct DataRowBody { impl DataRowBody { #[inline] - pub fn ranges<'a>(&'a self) -> DataRowRanges<'a> { + pub fn ranges(&self) -> DataRowRanges<'_> { DataRowRanges { buf: &self.storage, len: self.storage.len(), @@ -547,7 +535,7 @@ pub struct ErrorResponseBody { impl ErrorResponseBody { #[inline] - pub fn fields<'a>(&'a self) -> ErrorFields<'a> { + pub fn fields(&self) -> ErrorFields<'_> { ErrorFields { buf: &self.storage } } } @@ -578,10 +566,7 @@ impl<'a> FallibleIterator for ErrorFields<'a> { let value = get_str(&self.buf[..value_end])?; self.buf = &self.buf[value_end + 1..]; - Ok(Some(ErrorField { - type_: type_, - value: value, - })) + Ok(Some(ErrorField { type_, value })) } } @@ -608,7 +593,7 @@ pub struct NoticeResponseBody { impl NoticeResponseBody { #[inline] - pub fn fields<'a>(&'a self) -> ErrorFields<'a> { + pub fn fields(&self) -> ErrorFields<'_> { ErrorFields { buf: &self.storage } } } @@ -643,7 +628,7 @@ pub struct ParameterDescriptionBody { impl ParameterDescriptionBody { #[inline] - pub fn parameters<'a>(&'a self) -> Parameters<'a> { + pub fn parameters(&self) -> Parameters<'_> { Parameters { buf: &self.storage, remaining: self.len, @@ -719,7 +704,7 @@ pub struct RowDescriptionBody { impl RowDescriptionBody { #[inline] - pub fn fields<'a>(&'a self) -> Fields<'a> { + pub fn fields(&self) -> Fields<'_> { Fields { buf: &self.storage, remaining: self.len, @@ -761,13 +746,13 @@ impl<'a> FallibleIterator for Fields<'a> { let format = self.buf.read_i16::()?; Ok(Some(Field { - name: name, - table_oid: table_oid, - column_id: column_id, - type_oid: type_oid, - type_size: type_size, - type_modifier: type_modifier, - format: format, + name, + table_oid, + column_id, + type_oid, + type_size, + type_modifier, + format, })) } } diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index edb929ef9..35bc86be5 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -102,10 +102,16 @@ impl<'a> Message<'a> { Message::CancelRequest { process_id, secret_key, - } => Ok(cancel_request(process_id, secret_key, buf)), + } => { + cancel_request(process_id, secret_key, buf); + Ok(()) + } Message::Close { variant, name } => close(variant, name, buf), Message::CopyData { data } => copy_data(data, buf), - Message::CopyDone => Ok(copy_done(buf)), + Message::CopyDone => { + copy_done(buf); + Ok(()) + } Message::CopyFail { message } => copy_fail(message, buf), Message::Describe { variant, name } => describe(variant, name, buf), Message::Execute { portal, max_rows } => execute(portal, max_rows, buf), @@ -120,12 +126,21 @@ impl<'a> Message<'a> { sasl_initial_response(mechanism, data, buf) } Message::SaslResponse { data } => sasl_response(data, buf), - Message::SslRequest => Ok(ssl_request(buf)), + Message::SslRequest => { + ssl_request(buf); + Ok(()) + } Message::StartupMessage { parameters } => { startup_message(parameters.iter().map(|&(ref k, ref v)| (&**k, &**v)), buf) } - Message::Sync => Ok(sync(buf)), - Message::Terminate => Ok(terminate(buf)), + Message::Sync => { + sync(buf); + Ok(()) + } + Message::Terminate => { + terminate(buf); + Ok(()) + } Message::__ForExtensibility => unreachable!(), } } @@ -222,7 +237,7 @@ where #[inline] pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut Vec) { write_body(buf, |buf| { - buf.write_i32::(80877102).unwrap(); + buf.write_i32::(80_877_102).unwrap(); buf.write_i32::(process_id).unwrap(); buf.write_i32::(secret_key) }) @@ -320,12 +335,15 @@ pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut Vec) -> #[inline] pub fn sasl_response(data: &[u8], buf: &mut Vec) -> io::Result<()> { buf.push(b'p'); - write_body(buf, |buf| Ok(buf.extend_from_slice(data))) + write_body(buf, |buf| { + buf.extend_from_slice(data); + Ok(()) + }) } #[inline] pub fn ssl_request(buf: &mut Vec) { - write_body(buf, |buf| buf.write_i32::(80877103)).unwrap(); + write_body(buf, |buf| buf.write_i32::(80_877_103)).unwrap(); } #[inline] @@ -334,10 +352,10 @@ where I: IntoIterator, { write_body(buf, |buf| { - buf.write_i32::(196608).unwrap(); + buf.write_i32::(196_608).unwrap(); for (key, value) in parameters { - buf.write_cstr(key.as_ref())?; - buf.write_cstr(value.as_ref())?; + buf.write_cstr(key)?; + buf.write_cstr(value)?; } buf.push(0); Ok(()) diff --git a/postgres-protocol/src/types.rs b/postgres-protocol/src/types.rs index 7a764a9d5..7fac2b9d8 100644 --- a/postgres-protocol/src/types.rs +++ b/postgres-protocol/src/types.rs @@ -216,7 +216,7 @@ pub fn hstore_from_sql<'a>( Ok(HstoreEntries { remaining: count, - buf: buf, + buf, }) } @@ -231,6 +231,7 @@ impl<'a> FallibleIterator for HstoreEntries<'a> { type Error = StdBox; #[inline] + #[allow(clippy::type_complexity)] fn next( &mut self, ) -> Result)>, StdBox> { @@ -324,6 +325,12 @@ impl<'a> Varbit<'a> { self.len } + /// Determines if the value has no bits. + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + /// Returns the bits as a slice of bytes. #[inline] pub fn bytes(&self) -> &'a [u8] { @@ -503,11 +510,11 @@ pub fn array_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox FallibleIterator for ArrayDimensions<'a> { let len = self.0.read_i32::()?; let lower_bound = self.0.read_i32::()?; - Ok(Some(ArrayDimension { - len: len, - lower_bound: lower_bound, - })) + Ok(Some(ArrayDimension { len, lower_bound })) } #[inline] @@ -783,7 +787,7 @@ pub fn point_from_sql(mut buf: &[u8]) -> Result(mut buf: &'a [u8]) -> Result, StdBox()?; Ok(Path { - closed: closed, - points: points, - buf: buf, + closed, + points, + buf, }) } @@ -940,7 +944,7 @@ impl<'a> FallibleIterator for PathPoints<'a> { let x = self.buf.read_f64::()?; let y = self.buf.read_f64::()?; - Ok(Some(Point { x: x, y: y })) + Ok(Some(Point { x, y })) } #[inline] @@ -979,18 +983,19 @@ mod test { #[test] fn int4() { let mut buf = vec![]; - int4_to_sql(0x01020304, &mut buf); - assert_eq!(int4_from_sql(&buf).unwrap(), 0x01020304); + int4_to_sql(0x0102_0304, &mut buf); + assert_eq!(int4_from_sql(&buf).unwrap(), 0x0102_0304); } #[test] fn int8() { let mut buf = vec![]; - int8_to_sql(0x0102030405060708, &mut buf); - assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102030405060708); + int8_to_sql(0x0102_0304_0506_0708, &mut buf); + assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102_0304_0506_0708); } #[test] + #[allow(clippy::float_cmp)] fn float4() { let mut buf = vec![]; float4_to_sql(10343.95, &mut buf); @@ -998,6 +1003,7 @@ mod test { } #[test] + #[allow(clippy::float_cmp)] fn float8() { let mut buf = vec![]; float8_to_sql(10343.95, &mut buf); diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 549234670..db4ecac62 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms, clippy::all)] use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -14,11 +14,6 @@ pub struct TlsConnector { } impl TlsConnector { - pub fn new(domain: &str) -> Result { - let connector = native_tls::TlsConnector::new()?; - Ok(TlsConnector::with_connector(connector, domain)) - } - pub fn with_connector(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { connector: tokio_tls::TlsConnector::from(connector), diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 58aabba56..08c482ef6 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms, clippy::all)] use futures::{try_ready, Async, Future, Poll}; use openssl::hash::MessageDigest; diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 00a09630e..398580ff7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -34,7 +34,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] antidote = "1.0" bytes = "0.4" -fallible-iterator = "0.1.3" +fallible-iterator = "0.1.6" futures = "0.1.7" futures-cpupool = "0.1" log = "0.4" diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 8317522a3..de3ec2648 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -10,6 +10,12 @@ pub struct Builder { password: Option, } +impl Default for Builder { + fn default() -> Builder { + Builder::new() + } +} + impl Builder { pub fn new() -> Builder { let mut params = HashMap::new(); diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 20bb95e52..1ff20c37f 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -8,6 +8,7 @@ use std::io; pub use self::sqlstate::*; +#[allow(clippy::unreadable_literal)] mod sqlstate; /// The severity of a Postgres error or notice. @@ -85,7 +86,7 @@ pub struct DbError { } impl DbError { - pub(crate) fn new(fields: &mut ErrorFields<'_>) -> io::Result { + pub(crate) fn parse(fields: &mut ErrorFields<'_>) -> io::Result { let mut severity = None; let mut parsed_severity = None; let mut code = None; @@ -160,18 +161,18 @@ impl DbError { Ok(DbError { severity: severity .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`S` field missing"))?, - parsed_severity: parsed_severity, + parsed_severity, code: code .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`C` field missing"))?, message: message .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "`M` field missing"))?, - detail: detail, - hint: hint, + detail, + hint, position: match normal_position { Some(position) => Some(ErrorPosition::Original(position)), None => match internal_position { Some(position) => Some(ErrorPosition::Internal { - position: position, + position, query: internal_query.ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, @@ -182,15 +183,15 @@ impl DbError { None => None, }, }, - where_: where_, - schema: schema, - table: table, - column: column, - datatype: datatype, - constraint: constraint, - file: file, - line: line, - routine: routine, + where_, + schema, + table, + column, + datatype, + constraint, + file, + line, + routine, }) } @@ -423,8 +424,9 @@ impl Error { Error::new(Kind::UnexpectedMessage, None) } + #[allow(clippy::needless_pass_by_value)] pub(crate) fn db(error: ErrorResponseBody) -> Error { - match DbError::new(&mut error.fields()) { + match DbError::parse(&mut error.fields()) { Ok(e) => Error::new(Kind::Db, Some(Box::new(e))), Err(e) => Error::new(Kind::Parse, Some(Box::new(e))), } @@ -438,6 +440,7 @@ impl Error { Error::new(Kind::Encode, Some(Box::new(e))) } + #[allow(clippy::wrong_self_convention)] pub(crate) fn to_sql(e: Box) -> Error { Error::new(Kind::ToSql, Some(e)) } diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 7dddfc7e2..aef7b7967 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1,5 +1,4 @@ // Autogenerated file - DO NOT EDIT -use phf; use std::borrow::Cow; /// A SQLSTATE error code @@ -33,8 +32,7 @@ impl SqlState { pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Cow::Borrowed("01003")); /// 01007 pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); @@ -52,8 +50,7 @@ impl SqlState { pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = - SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("02001")); /// 03000 pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); @@ -68,12 +65,10 @@ impl SqlState { pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08001")); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08004")); /// 08007 pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); @@ -109,8 +104,7 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = - SqlState(Cow::Borrowed("0Z002")); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Cow::Borrowed("0Z002")); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); @@ -164,8 +158,7 @@ impl SqlState { pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201G")); /// 22018 pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); @@ -198,8 +191,7 @@ impl SqlState { pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = - SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201X")); /// 2202H pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); @@ -310,27 +302,22 @@ impl SqlState { pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = - SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Cow::Borrowed("25008")); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25003")); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25004")); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25004")); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25005")); /// 25006 pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = - SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("25007")); /// 25P01 pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); @@ -357,8 +344,7 @@ impl SqlState { pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = - SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2B000")); /// 2BP01 pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); @@ -370,8 +356,7 @@ impl SqlState { pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = - SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Cow::Borrowed("2F005")); /// 2F002 pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); @@ -713,8 +698,7 @@ impl SqlState { pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = - SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Cow::Borrowed("HV090")); /// HV00A pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); @@ -778,299 +762,299 @@ impl SqlState { } #[cfg_attr(rustfmt, rustfmt_skip)] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 1897749892740154578, + key: 6246114685207409605, disps: ::phf::Slice::Static(&[ - (1, 99), - (0, 0), - (1, 5), - (0, 3), - (0, 110), - (0, 54), - (0, 3), - (0, 13), - (0, 0), - (0, 24), - (0, 214), - (0, 52), - (1, 34), - (0, 33), - (0, 44), - (0, 130), - (0, 16), - (0, 187), - (0, 3), - (13, 168), - (0, 4), - (0, 19), + (1, 77), + (0, 21), + (0, 6), (0, 13), - (0, 87), - (0, 0), - (0, 108), - (0, 123), - (7, 181), - (0, 109), - (0, 32), - (0, 0), - (1, 69), - (1, 81), - (1, 219), - (0, 157), - (2, 41), - (8, 141), + (0, 28), + (0, 30), + (0, 1), + (0, 47), + (0, 5), (0, 5), + (0, 52), + (0, 96), + (0, 66), + (3, 167), + (0, 10), + (0, 56), + (2, 159), + (0, 28), + (0, 11), + (0, 6), (0, 0), + (1, 188), + (7, 122), + (2, 32), (1, 6), - (0, 3), - (1, 146), - (1, 227), - (9, 94), - (10, 158), - (29, 65), - (3, 2), - (0, 33), - (1, 94), + (1, 142), + (1, 0), + (1, 71), + (2, 35), + (0, 1), + (0, 68), + (0, 66), + (5, 43), + (0, 0), + (1, 0), + (2, 123), + (1, 1), + (0, 31), + (3, 7), + (3, 172), + (0, 35), + (0, 107), + (0, 106), + (0, 102), + (17, 207), + (6, 8), + (0, 105), + (26, 87), + (0, 21), ]), entries: ::phf::Slice::Static(&[ - ("23001", SqlState::RESTRICT_VIOLATION), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("P0000", SqlState::PLPGSQL_ERROR), - ("58000", SqlState::SYSTEM_ERROR), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("428C9", SqlState::GENERATED_ALWAYS), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("42622", SqlState::NAME_TOO_LONG), + ("42804", SqlState::DATATYPE_MISMATCH), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("22000", SqlState::DATA_EXCEPTION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("42704", SqlState::UNDEFINED_OBJECT), + ("42703", SqlState::UNDEFINED_COLUMN), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("23505", SqlState::UNIQUE_VIOLATION), + ("42712", SqlState::DUPLICATE_ALIAS), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), ("20000", SqlState::CASE_NOT_FOUND), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("08006", SqlState::CONNECTION_FAILURE), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("57014", SqlState::QUERY_CANCELED), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("01000", SqlState::WARNING), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("42846", SqlState::CANNOT_COERCE), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("XX000", SqlState::INTERNAL_ERROR), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("42939", SqlState::RESERVED_NAME), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("42P20", SqlState::WINDOWING_ERROR), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("HV00B", SqlState::FDW_INVALID_HANDLE), ("54011", SqlState::TOO_MANY_COLUMNS), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("42701", SqlState::DUPLICATE_COLUMN), ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("42704", SqlState::UNDEFINED_OBJECT), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("0L000", SqlState::INVALID_GRANTOR), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("42939", SqlState::RESERVED_NAME), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("42P19", SqlState::INVALID_RECURSION), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("58P02", SqlState::DUPLICATE_FILE), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("42P19", SqlState::INVALID_RECURSION), + ("XX000", SqlState::INTERNAL_ERROR), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("57P04", SqlState::DATABASE_DROPPED), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("22011", SqlState::SUBSTRING_ERROR), + ("42P21", SqlState::COLLATION_MISMATCH), ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("P0002", SqlState::NO_DATA_FOUND), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("22027", SqlState::TRIM_ERROR), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("42602", SqlState::INVALID_NAME), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("22000", SqlState::DATA_EXCEPTION), - ("28P01", SqlState::INVALID_PASSWORD), - ("23514", SqlState::CHECK_VIOLATION), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("57P02", SqlState::CRASH_SHUTDOWN), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("0L000", SqlState::INVALID_GRANTOR), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), ("42P03", SqlState::DUPLICATE_CURSOR), + ("P0004", SqlState::ASSERT_FAILURE), + ("P0003", SqlState::TOO_MANY_ROWS), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("42710", SqlState::DUPLICATE_OBJECT), + ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("72000", SqlState::SNAPSHOT_TOO_OLD), ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("42701", SqlState::DUPLICATE_COLUMN), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("P0001", SqlState::RAISE_EXCEPTION), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("58000", SqlState::SYSTEM_ERROR), + ("42P07", SqlState::DUPLICATE_TABLE), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), ("08P01", SqlState::PROTOCOL_VIOLATION), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("XX002", SqlState::INDEX_CORRUPTED), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("22012", SqlState::DIVISION_BY_ZERO), - ("XX001", SqlState::DATA_CORRUPTED), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), ("42P01", SqlState::UNDEFINED_TABLE), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("P0002", SqlState::NO_DATA_FOUND), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("P0000", SqlState::PLPGSQL_ERROR), ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("P0004", SqlState::ASSERT_FAILURE), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("42703", SqlState::UNDEFINED_COLUMN), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("22011", SqlState::SUBSTRING_ERROR), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("42803", SqlState::GROUPING_ERROR), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("XX002", SqlState::INDEX_CORRUPTED), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("42601", SqlState::SYNTAX_ERROR), - ("42622", SqlState::NAME_TOO_LONG), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("42P21", SqlState::COLLATION_MISMATCH), - ("23505", SqlState::UNIQUE_VIOLATION), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("58P01", SqlState::UNDEFINED_FILE), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("23P01", SqlState::EXCLUSION_VIOLATION), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("58030", SqlState::IO_ERROR), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("42710", SqlState::DUPLICATE_OBJECT), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("42804", SqlState::DATATYPE_MISMATCH), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("42P07", SqlState::DUPLICATE_TABLE), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("HV000", SqlState::FDW_ERROR), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("22027", SqlState::TRIM_ERROR), ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("P0003", SqlState::TOO_MANY_ROWS), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("55006", SqlState::OBJECT_IN_USE), + ("23514", SqlState::CHECK_VIOLATION), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("57P01", SqlState::ADMIN_SHUTDOWN), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("58P02", SqlState::DUPLICATE_FILE), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("28P01", SqlState::INVALID_PASSWORD), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("57014", SqlState::QUERY_CANCELED), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("42602", SqlState::INVALID_NAME), + ("23001", SqlState::RESTRICT_VIOLATION), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("HV000", SqlState::FDW_ERROR), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), ("53200", SqlState::OUT_OF_MEMORY), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("53100", SqlState::DISK_FULL), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("3D000", SqlState::INVALID_CATALOG_NAME), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("428C9", SqlState::GENERATED_ALWAYS), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("02000", SqlState::NO_DATA), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("P0001", SqlState::RAISE_EXCEPTION), + ("58030", SqlState::IO_ERROR), + ("XX001", SqlState::DATA_CORRUPTED), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("42601", SqlState::SYNTAX_ERROR), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("53100", SqlState::DISK_FULL), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("55006", SqlState::OBJECT_IN_USE), ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("42P20", SqlState::WINDOWING_ERROR), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("57P04", SqlState::DATABASE_DROPPED), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), ("42P04", SqlState::DUPLICATE_DATABASE), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("42846", SqlState::CANNOT_COERCE), + ("08006", SqlState::CONNECTION_FAILURE), + ("42830", SqlState::INVALID_FOREIGN_KEY), ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("01000", SqlState::WARNING), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("02000", SqlState::NO_DATA), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("58P01", SqlState::UNDEFINED_FILE), + ("22012", SqlState::DIVISION_BY_ZERO), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("42803", SqlState::GROUPING_ERROR), + ("3F000", SqlState::INVALID_SCHEMA_NAME), ]), }; diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 83bee86a9..3c8342e07 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -1,4 +1,4 @@ -#![warn(rust_2018_idioms)] +#![warn(rust_2018_idioms, clippy::all)] use bytes::{Bytes, IntoBuf}; use futures::{try_ready, Async, Future, Poll, Stream}; @@ -129,6 +129,7 @@ where } } +#[allow(clippy::large_enum_variant)] pub enum AsyncMessage { Notice(DbError), Notification(Notification), @@ -308,6 +309,10 @@ impl Row { self.0.columns() } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn len(&self) -> usize { self.0.len() } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 1931773ba..fdc807657 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -236,8 +236,8 @@ impl Client { ); match r { Ok(()) => Ok(buf), - Err(frontend::BindError::Conversion(e)) => return Err(Error::to_sql(e)), - Err(frontend::BindError::Serialization(e)) => return Err(Error::encode(e)), + Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e)), + Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)), } } diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index 4bae09ff5..4684b4f3f 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -103,7 +103,7 @@ where let message = match message { Message::NoticeResponse(body) => { - let error = DbError::new(&mut body.fields()).map_err(Error::parse)?; + let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?; return Ok(Some(AsyncMessage::Notice(error))); } Message::NotificationResponse(body) => { diff --git a/tokio-postgres/src/proto/prepare.rs b/tokio-postgres/src/proto/prepare.rs index 368af5740..aa4739ec6 100644 --- a/tokio-postgres/src/proto/prepare.rs +++ b/tokio-postgres/src/proto/prepare.rs @@ -1,3 +1,5 @@ +#![allow(clippy::large_enum_variant)] + use fallible_iterator::FallibleIterator; use futures::sync::mpsc; use futures::{try_ready, Future, Poll, Stream}; diff --git a/tokio-postgres/src/proto/query.rs b/tokio-postgres/src/proto/query.rs index 87474eda1..e648023c3 100644 --- a/tokio-postgres/src/proto/query.rs +++ b/tokio-postgres/src/proto/query.rs @@ -86,7 +86,7 @@ where } Some(Message::ErrorResponse(body)) => break Err(Error::db(body)), Some(Message::DataRow(body)) => { - let row = Row::new(statement.statement().clone(), body)?; + let row = Row::parse(statement.statement().clone(), body)?; self.0 = State::ReadingResponse { receiver, statement, diff --git a/tokio-postgres/src/proto/row.rs b/tokio-postgres/src/proto/row.rs index 5fad43753..ef32b67da 100644 --- a/tokio-postgres/src/proto/row.rs +++ b/tokio-postgres/src/proto/row.rs @@ -12,8 +12,8 @@ pub struct Row { } impl Row { - pub fn new(statement: Statement, data: DataRowBody) -> Result { - let data = RowData::new(data).map_err(Error::parse)?; + pub(crate) fn parse(statement: Statement, data: DataRowBody) -> Result { + let data = RowData::parse(data).map_err(Error::parse)?; Ok(Row { statement, data }) } diff --git a/tokio-postgres/src/proto/transaction.rs b/tokio-postgres/src/proto/transaction.rs index fb828f780..4f5fa3870 100644 --- a/tokio-postgres/src/proto/transaction.rs +++ b/tokio-postgres/src/proto/transaction.rs @@ -63,7 +63,7 @@ where state: &'a mut RentToOwn<'a, Running>, ) -> Poll, E> { match state.future.poll() { - Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::NotReady) => Ok(Async::NotReady), Ok(Async::Ready(t)) => transition!(Finishing { future: state.client.batch_execute("COMMIT"), result: Ok(t), @@ -79,7 +79,7 @@ where state: &'a mut RentToOwn<'a, Finishing>, ) -> Poll, E> { match state.future.poll() { - Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::NotReady) => Ok(Async::NotReady), Ok(Async::Ready(())) => { let t = state.take().result?; transition!(Finished(t)) diff --git a/tokio-postgres/src/proto/typeinfo.rs b/tokio-postgres/src/proto/typeinfo.rs index 78a62611a..416f61d41 100644 --- a/tokio-postgres/src/proto/typeinfo.rs +++ b/tokio-postgres/src/proto/typeinfo.rs @@ -12,7 +12,7 @@ use crate::proto::typeinfo_composite::TypeinfoCompositeFuture; use crate::proto::typeinfo_enum::TypeinfoEnumFuture; use crate::types::{Kind, Oid, Type}; -const TYPEINFO_QUERY: &'static str = " +const TYPEINFO_QUERY: &str = " SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid FROM pg_catalog.pg_type t LEFT OUTER JOIN pg_catalog.pg_range r ON r.rngtypid = t.oid @@ -21,7 +21,7 @@ WHERE t.oid = $1 "; // Range types weren't added until Postgres 9.2, so pg_range may not exist -const TYPEINFO_FALLBACK_QUERY: &'static str = " +const TYPEINFO_FALLBACK_QUERY: &str = " SELECT t.typname, t.typtype, t.typelem, NULL::OID, t.typbasetype, n.nspname, t.typrelid FROM pg_catalog.pg_type t INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid diff --git a/tokio-postgres/src/proto/typeinfo_composite.rs b/tokio-postgres/src/proto/typeinfo_composite.rs index 9df260a48..ca24f2e01 100644 --- a/tokio-postgres/src/proto/typeinfo_composite.rs +++ b/tokio-postgres/src/proto/typeinfo_composite.rs @@ -13,7 +13,7 @@ use crate::proto::statement::Statement; use crate::proto::typeinfo::TypeinfoFuture; use crate::types::{Field, Oid}; -const TYPEINFO_COMPOSITE_QUERY: &'static str = " +const TYPEINFO_COMPOSITE_QUERY: &str = " SELECT attname, atttypid FROM pg_catalog.pg_attribute WHERE attrelid = $1 diff --git a/tokio-postgres/src/proto/typeinfo_enum.rs b/tokio-postgres/src/proto/typeinfo_enum.rs index 2f676f800..2cf3d7f88 100644 --- a/tokio-postgres/src/proto/typeinfo_enum.rs +++ b/tokio-postgres/src/proto/typeinfo_enum.rs @@ -10,7 +10,7 @@ use crate::proto::query::QueryStream; use crate::proto::statement::Statement; use crate::types::Oid; -const TYPEINFO_ENUM_QUERY: &'static str = " +const TYPEINFO_ENUM_QUERY: &str = " SELECT enumlabel FROM pg_catalog.pg_enum WHERE enumtypid = $1 @@ -18,7 +18,7 @@ ORDER BY enumsortorder "; // Postgres 9.0 didn't have enumsortorder -const TYPEINFO_ENUM_FALLBACK_QUERY: &'static str = " +const TYPEINFO_ENUM_FALLBACK_QUERY: &str = " SELECT enumlabel FROM pg_catalog.pg_enum WHERE enumtypid = $1 diff --git a/tokio-postgres/src/rows.rs b/tokio-postgres/src/rows.rs index 54b934b5c..0360dfa55 100644 --- a/tokio-postgres/src/rows.rs +++ b/tokio-postgres/src/rows.rs @@ -61,29 +61,18 @@ where impl<'a, T> RowIndex for &'a T where T: ?Sized + Sealed {} -#[doc(hidden)] -pub struct RowData { +pub(crate) struct RowData { body: DataRowBody, ranges: Vec>>, } impl RowData { - pub fn new(body: DataRowBody) -> io::Result { + pub fn parse(body: DataRowBody) -> io::Result { let ranges = body.ranges().collect()?; - Ok(RowData { - body: body, - ranges: ranges, - }) - } - - pub fn len(&self) -> usize { - self.ranges.len() + Ok(RowData { body, ranges }) } pub fn get(&self, index: usize) -> Option<&[u8]> { - match &self.ranges[index] { - &Some(ref range) => Some(&self.body.buffer()[range.clone()]), - &None => None, - } + self.ranges[index].clone().map(|r| &self.body.buffer()[r]) } } diff --git a/tokio-postgres/src/stmt.rs b/tokio-postgres/src/stmt.rs index 037706d2d..be6cc9f49 100644 --- a/tokio-postgres/src/stmt.rs +++ b/tokio-postgres/src/stmt.rs @@ -10,10 +10,7 @@ pub struct Column { impl Column { #[doc(hidden)] pub fn new(name: String, type_: Type) -> Column { - Column { - name: name, - type_: type_, - } + Column { name, type_ } } /// Returns the name of the column. diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 6b212ea77..d9580c187 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -7,6 +7,7 @@ use std::borrow::Cow; use std::collections::HashMap; use std::error::Error; use std::fmt; +use std::hash::BuildHasher; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -18,7 +19,7 @@ pub use postgres_protocol::Oid; pub use crate::types::special::{Date, Timestamp}; // Number of seconds from 1970-01-01 to 2000-01-01 -const TIME_SEC_CONVERSION: u64 = 946684800; +const TIME_SEC_CONVERSION: u64 = 946_684_800; const USEC_PER_SEC: u64 = 1_000_000; const NSEC_PER_USEC: u64 = 1_000; @@ -109,10 +110,10 @@ impl Type { #[doc(hidden)] pub fn _new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { Type(Inner::Other(Arc::new(Other { - name: name, - oid: oid, - kind: kind, - schema: schema, + name, + oid, + kind, + schema, }))) } @@ -189,10 +190,7 @@ impl Field { impl Field { #[doc(hidden)] pub fn new(name: String, type_: Type) -> Field { - Field { - name: name, - type_: type_, - } + Field { name, type_ } } } @@ -440,11 +438,14 @@ simple_from!(i64, int8_from_sql, INT8); simple_from!(f32, float4_from_sql, FLOAT4); simple_from!(f64, float8_from_sql, FLOAT8); -impl<'a> FromSql<'a> for HashMap> { +impl<'a, S> FromSql<'a> for HashMap, S> +where + S: Default + BuildHasher, +{ fn from_sql( _: &Type, raw: &'a [u8], - ) -> Result>, Box> { + ) -> Result, S>, Box> { types::hstore_from_sql(raw)? .map(|(k, v)| (k.to_owned(), v.map(str::to_owned))) .collect() @@ -739,7 +740,10 @@ simple_to!(i64, int8_to_sql, INT8); simple_to!(f32, float4_to_sql, FLOAT4); simple_to!(f64, float8_to_sql, FLOAT8); -impl ToSql for HashMap> { +impl ToSql for HashMap, H> +where + H: BuildHasher, +{ fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::hstore_to_sql( self.iter().map(|(k, v)| (&**k, v.as_ref().map(|v| &**v))), @@ -760,7 +764,7 @@ impl ToSql for SystemTime { let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); let to_usec = - |d: Duration| d.as_secs() * USEC_PER_SEC + (d.subsec_nanos() as u64) / NSEC_PER_USEC; + |d: Duration| d.as_secs() * USEC_PER_SEC + u64::from(d.subsec_nanos()) / NSEC_PER_USEC; let time = match self.duration_since(epoch) { Ok(duration) => to_usec(duration) as i64, diff --git a/tokio-postgres/src/types/type_gen.rs b/tokio-postgres/src/types/type_gen.rs index 7dd28f42e..32fcd241e 100644 --- a/tokio-postgres/src/types/type_gen.rs +++ b/tokio-postgres/src/types/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::types::{Kind, Oid, Type}; +use crate::types::{Type, Oid, Kind}; #[derive(PartialEq, Eq, Debug)] pub struct Other { @@ -521,656 +521,493 @@ impl Inner { pub fn kind(&self) -> &Kind { match *self { Inner::Bool => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Bytea => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Char => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Name => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Int8 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Int2 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Int2Vector => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int2)); - V + &Kind::Array(Type(Inner::Int2)) } Inner::Int4 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Regproc => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Text => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Oid => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Tid => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Xid => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Cid => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::OidVector => { - const V: &'static Kind = &Kind::Array(Type(Inner::Oid)); - V + &Kind::Array(Type(Inner::Oid)) } Inner::PgDdlCommand => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Json => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Xml => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::XmlArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Xml)); - V + &Kind::Array(Type(Inner::Xml)) } Inner::PgNodeTree => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::JsonArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Json)); - V + &Kind::Array(Type(Inner::Json)) } Inner::Smgr => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::IndexAmHandler => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Point => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Lseg => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Path => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Box => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Polygon => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Line => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::LineArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Line)); - V + &Kind::Array(Type(Inner::Line)) } Inner::Cidr => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::CidrArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Cidr)); - V + &Kind::Array(Type(Inner::Cidr)) } Inner::Float4 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Float8 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Abstime => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Reltime => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Tinterval => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Unknown => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Circle => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::CircleArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Circle)); - V + &Kind::Array(Type(Inner::Circle)) } Inner::Macaddr8 => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Macaddr8Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Macaddr8)); - V + &Kind::Array(Type(Inner::Macaddr8)) } Inner::Money => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::MoneyArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Money)); - V + &Kind::Array(Type(Inner::Money)) } Inner::Macaddr => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Inet => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::BoolArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Bool)); - V + &Kind::Array(Type(Inner::Bool)) } Inner::ByteaArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Bytea)); - V + &Kind::Array(Type(Inner::Bytea)) } Inner::CharArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Char)); - V + &Kind::Array(Type(Inner::Char)) } Inner::NameArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Name)); - V + &Kind::Array(Type(Inner::Name)) } Inner::Int2Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int2)); - V + &Kind::Array(Type(Inner::Int2)) } Inner::Int2VectorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int2Vector)); - V + &Kind::Array(Type(Inner::Int2Vector)) } Inner::Int4Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int4)); - V + &Kind::Array(Type(Inner::Int4)) } Inner::RegprocArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regproc)); - V + &Kind::Array(Type(Inner::Regproc)) } Inner::TextArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Text)); - V + &Kind::Array(Type(Inner::Text)) } Inner::TidArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Tid)); - V + &Kind::Array(Type(Inner::Tid)) } Inner::XidArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Xid)); - V + &Kind::Array(Type(Inner::Xid)) } Inner::CidArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Cid)); - V + &Kind::Array(Type(Inner::Cid)) } Inner::OidVectorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::OidVector)); - V + &Kind::Array(Type(Inner::OidVector)) } Inner::BpcharArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Bpchar)); - V + &Kind::Array(Type(Inner::Bpchar)) } Inner::VarcharArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Varchar)); - V + &Kind::Array(Type(Inner::Varchar)) } Inner::Int8Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int8)); - V + &Kind::Array(Type(Inner::Int8)) } Inner::PointArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Point)); - V + &Kind::Array(Type(Inner::Point)) } Inner::LsegArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Lseg)); - V + &Kind::Array(Type(Inner::Lseg)) } Inner::PathArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Path)); - V + &Kind::Array(Type(Inner::Path)) } Inner::BoxArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Box)); - V + &Kind::Array(Type(Inner::Box)) } Inner::Float4Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Float4)); - V + &Kind::Array(Type(Inner::Float4)) } Inner::Float8Array => { - const V: &'static Kind = &Kind::Array(Type(Inner::Float8)); - V + &Kind::Array(Type(Inner::Float8)) } Inner::AbstimeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Abstime)); - V + &Kind::Array(Type(Inner::Abstime)) } Inner::ReltimeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Reltime)); - V + &Kind::Array(Type(Inner::Reltime)) } Inner::TintervalArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Tinterval)); - V + &Kind::Array(Type(Inner::Tinterval)) } Inner::PolygonArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Polygon)); - V + &Kind::Array(Type(Inner::Polygon)) } Inner::OidArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Oid)); - V + &Kind::Array(Type(Inner::Oid)) } Inner::Aclitem => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::AclitemArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Aclitem)); - V + &Kind::Array(Type(Inner::Aclitem)) } Inner::MacaddrArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Macaddr)); - V + &Kind::Array(Type(Inner::Macaddr)) } Inner::InetArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Inet)); - V + &Kind::Array(Type(Inner::Inet)) } Inner::Bpchar => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Varchar => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Date => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Time => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Timestamp => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::TimestampArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Timestamp)); - V + &Kind::Array(Type(Inner::Timestamp)) } Inner::DateArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Date)); - V + &Kind::Array(Type(Inner::Date)) } Inner::TimeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Time)); - V + &Kind::Array(Type(Inner::Time)) } Inner::Timestamptz => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::TimestamptzArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Timestamptz)); - V + &Kind::Array(Type(Inner::Timestamptz)) } Inner::Interval => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::IntervalArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Interval)); - V + &Kind::Array(Type(Inner::Interval)) } Inner::NumericArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Numeric)); - V + &Kind::Array(Type(Inner::Numeric)) } Inner::CstringArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Cstring)); - V + &Kind::Array(Type(Inner::Cstring)) } Inner::Timetz => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::TimetzArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Timetz)); - V + &Kind::Array(Type(Inner::Timetz)) } Inner::Bit => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::BitArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Bit)); - V + &Kind::Array(Type(Inner::Bit)) } Inner::Varbit => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::VarbitArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Varbit)); - V + &Kind::Array(Type(Inner::Varbit)) } Inner::Numeric => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Refcursor => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RefcursorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Refcursor)); - V + &Kind::Array(Type(Inner::Refcursor)) } Inner::Regprocedure => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Regoper => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Regoperator => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Regclass => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Regtype => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RegprocedureArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regprocedure)); - V + &Kind::Array(Type(Inner::Regprocedure)) } Inner::RegoperArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regoper)); - V + &Kind::Array(Type(Inner::Regoper)) } Inner::RegoperatorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regoperator)); - V + &Kind::Array(Type(Inner::Regoperator)) } Inner::RegclassArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regclass)); - V + &Kind::Array(Type(Inner::Regclass)) } Inner::RegtypeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regtype)); - V + &Kind::Array(Type(Inner::Regtype)) } Inner::Record => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Cstring => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Any => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Anyarray => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Void => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Trigger => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::LanguageHandler => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Internal => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Opaque => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Anyelement => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::RecordArray => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Anynonarray => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::TxidSnapshotArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::TxidSnapshot)); - V + &Kind::Array(Type(Inner::TxidSnapshot)) } Inner::Uuid => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::UuidArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Uuid)); - V + &Kind::Array(Type(Inner::Uuid)) } Inner::TxidSnapshot => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::FdwHandler => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::PgLsn => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::PgLsnArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::PgLsn)); - V + &Kind::Array(Type(Inner::PgLsn)) } Inner::TsmHandler => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::PgNdistinct => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::PgDependencies => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Anyenum => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::TsVector => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::Tsquery => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::GtsVector => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::TsVectorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::TsVector)); - V + &Kind::Array(Type(Inner::TsVector)) } Inner::GtsVectorArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::GtsVector)); - V + &Kind::Array(Type(Inner::GtsVector)) } Inner::TsqueryArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Tsquery)); - V + &Kind::Array(Type(Inner::Tsquery)) } Inner::Regconfig => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RegconfigArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regconfig)); - V + &Kind::Array(Type(Inner::Regconfig)) } Inner::Regdictionary => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RegdictionaryArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regdictionary)); - V + &Kind::Array(Type(Inner::Regdictionary)) } Inner::Jsonb => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::JsonbArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Jsonb)); - V + &Kind::Array(Type(Inner::Jsonb)) } Inner::AnyRange => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::EventTrigger => { - const V: &'static Kind = &Kind::Pseudo; - V + &Kind::Pseudo } Inner::Int4Range => { - const V: &'static Kind = &Kind::Range(Type(Inner::Int4)); - V + &Kind::Range(Type(Inner::Int4)) } Inner::Int4RangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int4Range)); - V + &Kind::Array(Type(Inner::Int4Range)) } Inner::NumRange => { - const V: &'static Kind = &Kind::Range(Type(Inner::Numeric)); - V + &Kind::Range(Type(Inner::Numeric)) } Inner::NumRangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::NumRange)); - V + &Kind::Array(Type(Inner::NumRange)) } Inner::TsRange => { - const V: &'static Kind = &Kind::Range(Type(Inner::Timestamp)); - V + &Kind::Range(Type(Inner::Timestamp)) } Inner::TsRangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::TsRange)); - V + &Kind::Array(Type(Inner::TsRange)) } Inner::TstzRange => { - const V: &'static Kind = &Kind::Range(Type(Inner::Timestamptz)); - V + &Kind::Range(Type(Inner::Timestamptz)) } Inner::TstzRangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::TstzRange)); - V + &Kind::Array(Type(Inner::TstzRange)) } Inner::DateRange => { - const V: &'static Kind = &Kind::Range(Type(Inner::Date)); - V + &Kind::Range(Type(Inner::Date)) } Inner::DateRangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::DateRange)); - V + &Kind::Array(Type(Inner::DateRange)) } Inner::Int8Range => { - const V: &'static Kind = &Kind::Range(Type(Inner::Int8)); - V + &Kind::Range(Type(Inner::Int8)) } Inner::Int8RangeArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Int8Range)); - V + &Kind::Array(Type(Inner::Int8Range)) } Inner::Regnamespace => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RegnamespaceArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regnamespace)); - V + &Kind::Array(Type(Inner::Regnamespace)) } Inner::Regrole => { - const V: &'static Kind = &Kind::Simple; - V + &Kind::Simple } Inner::RegroleArray => { - const V: &'static Kind = &Kind::Array(Type(Inner::Regrole)); - V + &Kind::Array(Type(Inner::Regrole)) } Inner::Other(ref u) => &u.kind, } @@ -1834,4 +1671,4 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); -} +} \ No newline at end of file From fa8fa1a75b9cf47f60fb048811e1faacff7140a8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Dec 2018 21:44:59 -0800 Subject: [PATCH 040/819] rustfmt --- tokio-postgres/src/error/sqlstate.rs | 45 +- tokio-postgres/src/types/type_gen.rs | 656 +++++++-------------------- 2 files changed, 195 insertions(+), 506 deletions(-) diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index aef7b7967..dc33991fd 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -32,7 +32,8 @@ impl SqlState { pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("01003")); /// 01007 pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); @@ -50,7 +51,8 @@ impl SqlState { pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = + SqlState(Cow::Borrowed("02001")); /// 03000 pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); @@ -65,10 +67,12 @@ impl SqlState { pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08001")); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = + SqlState(Cow::Borrowed("08004")); /// 08007 pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); @@ -104,7 +108,8 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Cow::Borrowed("0Z002")); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = + SqlState(Cow::Borrowed("0Z002")); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); @@ -158,7 +163,8 @@ impl SqlState { pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = + SqlState(Cow::Borrowed("2201G")); /// 22018 pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); @@ -191,7 +197,8 @@ impl SqlState { pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = + SqlState(Cow::Borrowed("2201X")); /// 2202H pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); @@ -302,22 +309,27 @@ impl SqlState { pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = + SqlState(Cow::Borrowed("25008")); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25003")); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25004")); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25004")); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Cow::Borrowed("25005")); /// 25006 pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = + SqlState(Cow::Borrowed("25007")); /// 25P01 pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); @@ -344,7 +356,8 @@ impl SqlState { pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = + SqlState(Cow::Borrowed("2B000")); /// 2BP01 pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); @@ -356,7 +369,8 @@ impl SqlState { pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = + SqlState(Cow::Borrowed("2F005")); /// 2F002 pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); @@ -698,7 +712,8 @@ impl SqlState { pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = + SqlState(Cow::Borrowed("HV090")); /// HV00A pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); diff --git a/tokio-postgres/src/types/type_gen.rs b/tokio-postgres/src/types/type_gen.rs index 32fcd241e..f5b1b19c6 100644 --- a/tokio-postgres/src/types/type_gen.rs +++ b/tokio-postgres/src/types/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::types::{Type, Oid, Kind}; +use crate::types::{Kind, Oid, Type}; #[derive(PartialEq, Eq, Debug)] pub struct Other { @@ -520,495 +520,169 @@ impl Inner { pub fn kind(&self) -> &Kind { match *self { - Inner::Bool => { - &Kind::Simple - } - Inner::Bytea => { - &Kind::Simple - } - Inner::Char => { - &Kind::Simple - } - Inner::Name => { - &Kind::Simple - } - Inner::Int8 => { - &Kind::Simple - } - Inner::Int2 => { - &Kind::Simple - } - Inner::Int2Vector => { - &Kind::Array(Type(Inner::Int2)) - } - Inner::Int4 => { - &Kind::Simple - } - Inner::Regproc => { - &Kind::Simple - } - Inner::Text => { - &Kind::Simple - } - Inner::Oid => { - &Kind::Simple - } - Inner::Tid => { - &Kind::Simple - } - Inner::Xid => { - &Kind::Simple - } - Inner::Cid => { - &Kind::Simple - } - Inner::OidVector => { - &Kind::Array(Type(Inner::Oid)) - } - Inner::PgDdlCommand => { - &Kind::Pseudo - } - Inner::Json => { - &Kind::Simple - } - Inner::Xml => { - &Kind::Simple - } - Inner::XmlArray => { - &Kind::Array(Type(Inner::Xml)) - } - Inner::PgNodeTree => { - &Kind::Simple - } - Inner::JsonArray => { - &Kind::Array(Type(Inner::Json)) - } - Inner::Smgr => { - &Kind::Simple - } - Inner::IndexAmHandler => { - &Kind::Pseudo - } - Inner::Point => { - &Kind::Simple - } - Inner::Lseg => { - &Kind::Simple - } - Inner::Path => { - &Kind::Simple - } - Inner::Box => { - &Kind::Simple - } - Inner::Polygon => { - &Kind::Simple - } - Inner::Line => { - &Kind::Simple - } - Inner::LineArray => { - &Kind::Array(Type(Inner::Line)) - } - Inner::Cidr => { - &Kind::Simple - } - Inner::CidrArray => { - &Kind::Array(Type(Inner::Cidr)) - } - Inner::Float4 => { - &Kind::Simple - } - Inner::Float8 => { - &Kind::Simple - } - Inner::Abstime => { - &Kind::Simple - } - Inner::Reltime => { - &Kind::Simple - } - Inner::Tinterval => { - &Kind::Simple - } - Inner::Unknown => { - &Kind::Simple - } - Inner::Circle => { - &Kind::Simple - } - Inner::CircleArray => { - &Kind::Array(Type(Inner::Circle)) - } - Inner::Macaddr8 => { - &Kind::Simple - } - Inner::Macaddr8Array => { - &Kind::Array(Type(Inner::Macaddr8)) - } - Inner::Money => { - &Kind::Simple - } - Inner::MoneyArray => { - &Kind::Array(Type(Inner::Money)) - } - Inner::Macaddr => { - &Kind::Simple - } - Inner::Inet => { - &Kind::Simple - } - Inner::BoolArray => { - &Kind::Array(Type(Inner::Bool)) - } - Inner::ByteaArray => { - &Kind::Array(Type(Inner::Bytea)) - } - Inner::CharArray => { - &Kind::Array(Type(Inner::Char)) - } - Inner::NameArray => { - &Kind::Array(Type(Inner::Name)) - } - Inner::Int2Array => { - &Kind::Array(Type(Inner::Int2)) - } - Inner::Int2VectorArray => { - &Kind::Array(Type(Inner::Int2Vector)) - } - Inner::Int4Array => { - &Kind::Array(Type(Inner::Int4)) - } - Inner::RegprocArray => { - &Kind::Array(Type(Inner::Regproc)) - } - Inner::TextArray => { - &Kind::Array(Type(Inner::Text)) - } - Inner::TidArray => { - &Kind::Array(Type(Inner::Tid)) - } - Inner::XidArray => { - &Kind::Array(Type(Inner::Xid)) - } - Inner::CidArray => { - &Kind::Array(Type(Inner::Cid)) - } - Inner::OidVectorArray => { - &Kind::Array(Type(Inner::OidVector)) - } - Inner::BpcharArray => { - &Kind::Array(Type(Inner::Bpchar)) - } - Inner::VarcharArray => { - &Kind::Array(Type(Inner::Varchar)) - } - Inner::Int8Array => { - &Kind::Array(Type(Inner::Int8)) - } - Inner::PointArray => { - &Kind::Array(Type(Inner::Point)) - } - Inner::LsegArray => { - &Kind::Array(Type(Inner::Lseg)) - } - Inner::PathArray => { - &Kind::Array(Type(Inner::Path)) - } - Inner::BoxArray => { - &Kind::Array(Type(Inner::Box)) - } - Inner::Float4Array => { - &Kind::Array(Type(Inner::Float4)) - } - Inner::Float8Array => { - &Kind::Array(Type(Inner::Float8)) - } - Inner::AbstimeArray => { - &Kind::Array(Type(Inner::Abstime)) - } - Inner::ReltimeArray => { - &Kind::Array(Type(Inner::Reltime)) - } - Inner::TintervalArray => { - &Kind::Array(Type(Inner::Tinterval)) - } - Inner::PolygonArray => { - &Kind::Array(Type(Inner::Polygon)) - } - Inner::OidArray => { - &Kind::Array(Type(Inner::Oid)) - } - Inner::Aclitem => { - &Kind::Simple - } - Inner::AclitemArray => { - &Kind::Array(Type(Inner::Aclitem)) - } - Inner::MacaddrArray => { - &Kind::Array(Type(Inner::Macaddr)) - } - Inner::InetArray => { - &Kind::Array(Type(Inner::Inet)) - } - Inner::Bpchar => { - &Kind::Simple - } - Inner::Varchar => { - &Kind::Simple - } - Inner::Date => { - &Kind::Simple - } - Inner::Time => { - &Kind::Simple - } - Inner::Timestamp => { - &Kind::Simple - } - Inner::TimestampArray => { - &Kind::Array(Type(Inner::Timestamp)) - } - Inner::DateArray => { - &Kind::Array(Type(Inner::Date)) - } - Inner::TimeArray => { - &Kind::Array(Type(Inner::Time)) - } - Inner::Timestamptz => { - &Kind::Simple - } - Inner::TimestamptzArray => { - &Kind::Array(Type(Inner::Timestamptz)) - } - Inner::Interval => { - &Kind::Simple - } - Inner::IntervalArray => { - &Kind::Array(Type(Inner::Interval)) - } - Inner::NumericArray => { - &Kind::Array(Type(Inner::Numeric)) - } - Inner::CstringArray => { - &Kind::Array(Type(Inner::Cstring)) - } - Inner::Timetz => { - &Kind::Simple - } - Inner::TimetzArray => { - &Kind::Array(Type(Inner::Timetz)) - } - Inner::Bit => { - &Kind::Simple - } - Inner::BitArray => { - &Kind::Array(Type(Inner::Bit)) - } - Inner::Varbit => { - &Kind::Simple - } - Inner::VarbitArray => { - &Kind::Array(Type(Inner::Varbit)) - } - Inner::Numeric => { - &Kind::Simple - } - Inner::Refcursor => { - &Kind::Simple - } - Inner::RefcursorArray => { - &Kind::Array(Type(Inner::Refcursor)) - } - Inner::Regprocedure => { - &Kind::Simple - } - Inner::Regoper => { - &Kind::Simple - } - Inner::Regoperator => { - &Kind::Simple - } - Inner::Regclass => { - &Kind::Simple - } - Inner::Regtype => { - &Kind::Simple - } - Inner::RegprocedureArray => { - &Kind::Array(Type(Inner::Regprocedure)) - } - Inner::RegoperArray => { - &Kind::Array(Type(Inner::Regoper)) - } - Inner::RegoperatorArray => { - &Kind::Array(Type(Inner::Regoperator)) - } - Inner::RegclassArray => { - &Kind::Array(Type(Inner::Regclass)) - } - Inner::RegtypeArray => { - &Kind::Array(Type(Inner::Regtype)) - } - Inner::Record => { - &Kind::Pseudo - } - Inner::Cstring => { - &Kind::Pseudo - } - Inner::Any => { - &Kind::Pseudo - } - Inner::Anyarray => { - &Kind::Pseudo - } - Inner::Void => { - &Kind::Pseudo - } - Inner::Trigger => { - &Kind::Pseudo - } - Inner::LanguageHandler => { - &Kind::Pseudo - } - Inner::Internal => { - &Kind::Pseudo - } - Inner::Opaque => { - &Kind::Pseudo - } - Inner::Anyelement => { - &Kind::Pseudo - } - Inner::RecordArray => { - &Kind::Pseudo - } - Inner::Anynonarray => { - &Kind::Pseudo - } - Inner::TxidSnapshotArray => { - &Kind::Array(Type(Inner::TxidSnapshot)) - } - Inner::Uuid => { - &Kind::Simple - } - Inner::UuidArray => { - &Kind::Array(Type(Inner::Uuid)) - } - Inner::TxidSnapshot => { - &Kind::Simple - } - Inner::FdwHandler => { - &Kind::Pseudo - } - Inner::PgLsn => { - &Kind::Simple - } - Inner::PgLsnArray => { - &Kind::Array(Type(Inner::PgLsn)) - } - Inner::TsmHandler => { - &Kind::Pseudo - } - Inner::PgNdistinct => { - &Kind::Simple - } - Inner::PgDependencies => { - &Kind::Simple - } - Inner::Anyenum => { - &Kind::Pseudo - } - Inner::TsVector => { - &Kind::Simple - } - Inner::Tsquery => { - &Kind::Simple - } - Inner::GtsVector => { - &Kind::Simple - } - Inner::TsVectorArray => { - &Kind::Array(Type(Inner::TsVector)) - } - Inner::GtsVectorArray => { - &Kind::Array(Type(Inner::GtsVector)) - } - Inner::TsqueryArray => { - &Kind::Array(Type(Inner::Tsquery)) - } - Inner::Regconfig => { - &Kind::Simple - } - Inner::RegconfigArray => { - &Kind::Array(Type(Inner::Regconfig)) - } - Inner::Regdictionary => { - &Kind::Simple - } - Inner::RegdictionaryArray => { - &Kind::Array(Type(Inner::Regdictionary)) - } - Inner::Jsonb => { - &Kind::Simple - } - Inner::JsonbArray => { - &Kind::Array(Type(Inner::Jsonb)) - } - Inner::AnyRange => { - &Kind::Pseudo - } - Inner::EventTrigger => { - &Kind::Pseudo - } - Inner::Int4Range => { - &Kind::Range(Type(Inner::Int4)) - } - Inner::Int4RangeArray => { - &Kind::Array(Type(Inner::Int4Range)) - } - Inner::NumRange => { - &Kind::Range(Type(Inner::Numeric)) - } - Inner::NumRangeArray => { - &Kind::Array(Type(Inner::NumRange)) - } - Inner::TsRange => { - &Kind::Range(Type(Inner::Timestamp)) - } - Inner::TsRangeArray => { - &Kind::Array(Type(Inner::TsRange)) - } - Inner::TstzRange => { - &Kind::Range(Type(Inner::Timestamptz)) - } - Inner::TstzRangeArray => { - &Kind::Array(Type(Inner::TstzRange)) - } - Inner::DateRange => { - &Kind::Range(Type(Inner::Date)) - } - Inner::DateRangeArray => { - &Kind::Array(Type(Inner::DateRange)) - } - Inner::Int8Range => { - &Kind::Range(Type(Inner::Int8)) - } - Inner::Int8RangeArray => { - &Kind::Array(Type(Inner::Int8Range)) - } - Inner::Regnamespace => { - &Kind::Simple - } - Inner::RegnamespaceArray => { - &Kind::Array(Type(Inner::Regnamespace)) - } - Inner::Regrole => { - &Kind::Simple - } - Inner::RegroleArray => { - &Kind::Array(Type(Inner::Regrole)) - } + Inner::Bool => &Kind::Simple, + Inner::Bytea => &Kind::Simple, + Inner::Char => &Kind::Simple, + Inner::Name => &Kind::Simple, + Inner::Int8 => &Kind::Simple, + Inner::Int2 => &Kind::Simple, + Inner::Int2Vector => &Kind::Array(Type(Inner::Int2)), + Inner::Int4 => &Kind::Simple, + Inner::Regproc => &Kind::Simple, + Inner::Text => &Kind::Simple, + Inner::Oid => &Kind::Simple, + Inner::Tid => &Kind::Simple, + Inner::Xid => &Kind::Simple, + Inner::Cid => &Kind::Simple, + Inner::OidVector => &Kind::Array(Type(Inner::Oid)), + Inner::PgDdlCommand => &Kind::Pseudo, + Inner::Json => &Kind::Simple, + Inner::Xml => &Kind::Simple, + Inner::XmlArray => &Kind::Array(Type(Inner::Xml)), + Inner::PgNodeTree => &Kind::Simple, + Inner::JsonArray => &Kind::Array(Type(Inner::Json)), + Inner::Smgr => &Kind::Simple, + Inner::IndexAmHandler => &Kind::Pseudo, + Inner::Point => &Kind::Simple, + Inner::Lseg => &Kind::Simple, + Inner::Path => &Kind::Simple, + Inner::Box => &Kind::Simple, + Inner::Polygon => &Kind::Simple, + Inner::Line => &Kind::Simple, + Inner::LineArray => &Kind::Array(Type(Inner::Line)), + Inner::Cidr => &Kind::Simple, + Inner::CidrArray => &Kind::Array(Type(Inner::Cidr)), + Inner::Float4 => &Kind::Simple, + Inner::Float8 => &Kind::Simple, + Inner::Abstime => &Kind::Simple, + Inner::Reltime => &Kind::Simple, + Inner::Tinterval => &Kind::Simple, + Inner::Unknown => &Kind::Simple, + Inner::Circle => &Kind::Simple, + Inner::CircleArray => &Kind::Array(Type(Inner::Circle)), + Inner::Macaddr8 => &Kind::Simple, + Inner::Macaddr8Array => &Kind::Array(Type(Inner::Macaddr8)), + Inner::Money => &Kind::Simple, + Inner::MoneyArray => &Kind::Array(Type(Inner::Money)), + Inner::Macaddr => &Kind::Simple, + Inner::Inet => &Kind::Simple, + Inner::BoolArray => &Kind::Array(Type(Inner::Bool)), + Inner::ByteaArray => &Kind::Array(Type(Inner::Bytea)), + Inner::CharArray => &Kind::Array(Type(Inner::Char)), + Inner::NameArray => &Kind::Array(Type(Inner::Name)), + Inner::Int2Array => &Kind::Array(Type(Inner::Int2)), + Inner::Int2VectorArray => &Kind::Array(Type(Inner::Int2Vector)), + Inner::Int4Array => &Kind::Array(Type(Inner::Int4)), + Inner::RegprocArray => &Kind::Array(Type(Inner::Regproc)), + Inner::TextArray => &Kind::Array(Type(Inner::Text)), + Inner::TidArray => &Kind::Array(Type(Inner::Tid)), + Inner::XidArray => &Kind::Array(Type(Inner::Xid)), + Inner::CidArray => &Kind::Array(Type(Inner::Cid)), + Inner::OidVectorArray => &Kind::Array(Type(Inner::OidVector)), + Inner::BpcharArray => &Kind::Array(Type(Inner::Bpchar)), + Inner::VarcharArray => &Kind::Array(Type(Inner::Varchar)), + Inner::Int8Array => &Kind::Array(Type(Inner::Int8)), + Inner::PointArray => &Kind::Array(Type(Inner::Point)), + Inner::LsegArray => &Kind::Array(Type(Inner::Lseg)), + Inner::PathArray => &Kind::Array(Type(Inner::Path)), + Inner::BoxArray => &Kind::Array(Type(Inner::Box)), + Inner::Float4Array => &Kind::Array(Type(Inner::Float4)), + Inner::Float8Array => &Kind::Array(Type(Inner::Float8)), + Inner::AbstimeArray => &Kind::Array(Type(Inner::Abstime)), + Inner::ReltimeArray => &Kind::Array(Type(Inner::Reltime)), + Inner::TintervalArray => &Kind::Array(Type(Inner::Tinterval)), + Inner::PolygonArray => &Kind::Array(Type(Inner::Polygon)), + Inner::OidArray => &Kind::Array(Type(Inner::Oid)), + Inner::Aclitem => &Kind::Simple, + Inner::AclitemArray => &Kind::Array(Type(Inner::Aclitem)), + Inner::MacaddrArray => &Kind::Array(Type(Inner::Macaddr)), + Inner::InetArray => &Kind::Array(Type(Inner::Inet)), + Inner::Bpchar => &Kind::Simple, + Inner::Varchar => &Kind::Simple, + Inner::Date => &Kind::Simple, + Inner::Time => &Kind::Simple, + Inner::Timestamp => &Kind::Simple, + Inner::TimestampArray => &Kind::Array(Type(Inner::Timestamp)), + Inner::DateArray => &Kind::Array(Type(Inner::Date)), + Inner::TimeArray => &Kind::Array(Type(Inner::Time)), + Inner::Timestamptz => &Kind::Simple, + Inner::TimestamptzArray => &Kind::Array(Type(Inner::Timestamptz)), + Inner::Interval => &Kind::Simple, + Inner::IntervalArray => &Kind::Array(Type(Inner::Interval)), + Inner::NumericArray => &Kind::Array(Type(Inner::Numeric)), + Inner::CstringArray => &Kind::Array(Type(Inner::Cstring)), + Inner::Timetz => &Kind::Simple, + Inner::TimetzArray => &Kind::Array(Type(Inner::Timetz)), + Inner::Bit => &Kind::Simple, + Inner::BitArray => &Kind::Array(Type(Inner::Bit)), + Inner::Varbit => &Kind::Simple, + Inner::VarbitArray => &Kind::Array(Type(Inner::Varbit)), + Inner::Numeric => &Kind::Simple, + Inner::Refcursor => &Kind::Simple, + Inner::RefcursorArray => &Kind::Array(Type(Inner::Refcursor)), + Inner::Regprocedure => &Kind::Simple, + Inner::Regoper => &Kind::Simple, + Inner::Regoperator => &Kind::Simple, + Inner::Regclass => &Kind::Simple, + Inner::Regtype => &Kind::Simple, + Inner::RegprocedureArray => &Kind::Array(Type(Inner::Regprocedure)), + Inner::RegoperArray => &Kind::Array(Type(Inner::Regoper)), + Inner::RegoperatorArray => &Kind::Array(Type(Inner::Regoperator)), + Inner::RegclassArray => &Kind::Array(Type(Inner::Regclass)), + Inner::RegtypeArray => &Kind::Array(Type(Inner::Regtype)), + Inner::Record => &Kind::Pseudo, + Inner::Cstring => &Kind::Pseudo, + Inner::Any => &Kind::Pseudo, + Inner::Anyarray => &Kind::Pseudo, + Inner::Void => &Kind::Pseudo, + Inner::Trigger => &Kind::Pseudo, + Inner::LanguageHandler => &Kind::Pseudo, + Inner::Internal => &Kind::Pseudo, + Inner::Opaque => &Kind::Pseudo, + Inner::Anyelement => &Kind::Pseudo, + Inner::RecordArray => &Kind::Pseudo, + Inner::Anynonarray => &Kind::Pseudo, + Inner::TxidSnapshotArray => &Kind::Array(Type(Inner::TxidSnapshot)), + Inner::Uuid => &Kind::Simple, + Inner::UuidArray => &Kind::Array(Type(Inner::Uuid)), + Inner::TxidSnapshot => &Kind::Simple, + Inner::FdwHandler => &Kind::Pseudo, + Inner::PgLsn => &Kind::Simple, + Inner::PgLsnArray => &Kind::Array(Type(Inner::PgLsn)), + Inner::TsmHandler => &Kind::Pseudo, + Inner::PgNdistinct => &Kind::Simple, + Inner::PgDependencies => &Kind::Simple, + Inner::Anyenum => &Kind::Pseudo, + Inner::TsVector => &Kind::Simple, + Inner::Tsquery => &Kind::Simple, + Inner::GtsVector => &Kind::Simple, + Inner::TsVectorArray => &Kind::Array(Type(Inner::TsVector)), + Inner::GtsVectorArray => &Kind::Array(Type(Inner::GtsVector)), + Inner::TsqueryArray => &Kind::Array(Type(Inner::Tsquery)), + Inner::Regconfig => &Kind::Simple, + Inner::RegconfigArray => &Kind::Array(Type(Inner::Regconfig)), + Inner::Regdictionary => &Kind::Simple, + Inner::RegdictionaryArray => &Kind::Array(Type(Inner::Regdictionary)), + Inner::Jsonb => &Kind::Simple, + Inner::JsonbArray => &Kind::Array(Type(Inner::Jsonb)), + Inner::AnyRange => &Kind::Pseudo, + Inner::EventTrigger => &Kind::Pseudo, + Inner::Int4Range => &Kind::Range(Type(Inner::Int4)), + Inner::Int4RangeArray => &Kind::Array(Type(Inner::Int4Range)), + Inner::NumRange => &Kind::Range(Type(Inner::Numeric)), + Inner::NumRangeArray => &Kind::Array(Type(Inner::NumRange)), + Inner::TsRange => &Kind::Range(Type(Inner::Timestamp)), + Inner::TsRangeArray => &Kind::Array(Type(Inner::TsRange)), + Inner::TstzRange => &Kind::Range(Type(Inner::Timestamptz)), + Inner::TstzRangeArray => &Kind::Array(Type(Inner::TstzRange)), + Inner::DateRange => &Kind::Range(Type(Inner::Date)), + Inner::DateRangeArray => &Kind::Array(Type(Inner::DateRange)), + Inner::Int8Range => &Kind::Range(Type(Inner::Int8)), + Inner::Int8RangeArray => &Kind::Array(Type(Inner::Int8Range)), + Inner::Regnamespace => &Kind::Simple, + Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), + Inner::Regrole => &Kind::Simple, + Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), Inner::Other(ref u) => &u.kind, } } @@ -1671,4 +1345,4 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); -} \ No newline at end of file +} From f445f034cbe3f0386b8f0121fbb38a08a42504ec Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Dec 2018 21:46:46 -0800 Subject: [PATCH 041/819] Check after restoring caches --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0f23fddce..324151f12 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -29,13 +29,13 @@ jobs: steps: - checkout - run: rustup component add rustfmt clippy - - run: cargo fmt --all -- --check - - run: cargo clippy --all - *RESTORE_REGISTRY - run: cargo generate-lockfile - *SAVE_REGISTRY - run: rustc --version > ~/rust-version - *RESTORE_DEPS + - run: cargo fmt --all -- --check + - run: cargo clippy --all - run: cargo test --all - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features - *SAVE_DEPS From c23e3e0bda801247fc17df4142c8bc2376d5d0a9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 10 Dec 2018 20:56:22 -0800 Subject: [PATCH 042/819] Merge row bits --- tokio-postgres/src/lib.rs | 53 ++---------- tokio-postgres/src/proto/mod.rs | 2 - tokio-postgres/src/proto/query.rs | 5 +- tokio-postgres/src/proto/row.rs | 65 --------------- tokio-postgres/src/row.rs | 134 ++++++++++++++++++++++++++++++ tokio-postgres/src/rows.rs | 78 ----------------- 6 files changed, 141 insertions(+), 196 deletions(-) delete mode 100644 tokio-postgres/src/proto/row.rs create mode 100644 tokio-postgres/src/row.rs delete mode 100644 tokio-postgres/src/rows.rs diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 3c8342e07..e1f86ce92 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,22 +3,21 @@ use bytes::{Bytes, IntoBuf}; use futures::{try_ready, Async, Future, Poll, Stream}; use std::error::Error as StdError; -use std::fmt; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; pub use crate::builder::*; pub use crate::error::*; use crate::proto::CancelFuture; -use crate::rows::RowIndex; +pub use crate::row::{Row, RowIndex}; pub use crate::stmt::Column; pub use crate::tls::*; -use crate::types::{FromSql, ToSql, Type}; +use crate::types::{ToSql, Type}; mod builder; pub mod error; mod proto; -pub mod rows; +mod row; mod stmt; mod tls; pub mod types; @@ -223,12 +222,7 @@ impl Stream for Query { type Error = Error; fn poll(&mut self) -> Poll, Error> { - match self.0.poll() { - Ok(Async::Ready(Some(row))) => Ok(Async::Ready(Some(Row(row)))), - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => Err(e), - } + self.0.poll() } } @@ -256,12 +250,7 @@ impl Stream for QueryPortal { type Error = Error; fn poll(&mut self) -> Poll, Error> { - match self.0.poll() { - Ok(Async::Ready(Some(row))) => Ok(Async::Ready(Some(Row(row)))), - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => Err(e), - } + self.0.poll() } } @@ -302,38 +291,6 @@ impl Stream for CopyOut { } } -pub struct Row(proto::Row); - -impl Row { - pub fn columns(&self) -> &[Column] { - self.0.columns() - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn len(&self) -> usize { - self.0.len() - } - - pub fn get<'a, I, T>(&'a self, idx: I) -> T - where - I: RowIndex + fmt::Debug, - T: FromSql<'a>, - { - self.0.get(idx) - } - - pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result, Error> - where - I: RowIndex, - T: FromSql<'a>, - { - self.0.try_get(idx) - } -} - pub struct TransactionBuilder(proto::Client); impl TransactionBuilder { diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 802bd6cf8..b1c82cb33 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -30,7 +30,6 @@ mod execute; mod portal; mod prepare; mod query; -mod row; mod simple_query; mod statement; mod tls; @@ -51,7 +50,6 @@ pub use crate::proto::execute::ExecuteFuture; pub use crate::proto::portal::Portal; pub use crate::proto::prepare::PrepareFuture; pub use crate::proto::query::QueryStream; -pub use crate::proto::row::Row; pub use crate::proto::simple_query::SimpleQueryFuture; pub use crate::proto::statement::Statement; pub use crate::proto::tls::TlsFuture; diff --git a/tokio-postgres/src/proto/query.rs b/tokio-postgres/src/proto/query.rs index e648023c3..59877f061 100644 --- a/tokio-postgres/src/proto/query.rs +++ b/tokio-postgres/src/proto/query.rs @@ -5,9 +5,8 @@ use std::mem; use crate::proto::client::{Client, PendingRequest}; use crate::proto::portal::Portal; -use crate::proto::row::Row; use crate::proto::statement::Statement; -use crate::Error; +use crate::{Error, Row}; pub trait StatementHolder { fn statement(&self) -> &Statement; @@ -86,7 +85,7 @@ where } Some(Message::ErrorResponse(body)) => break Err(Error::db(body)), Some(Message::DataRow(body)) => { - let row = Row::parse(statement.statement().clone(), body)?; + let row = Row::new(statement.statement().clone(), body)?; self.0 = State::ReadingResponse { receiver, statement, diff --git a/tokio-postgres/src/proto/row.rs b/tokio-postgres/src/proto/row.rs deleted file mode 100644 index ef32b67da..000000000 --- a/tokio-postgres/src/proto/row.rs +++ /dev/null @@ -1,65 +0,0 @@ -use postgres_protocol::message::backend::DataRowBody; -use std::fmt; - -use crate::proto::statement::Statement; -use crate::rows::{RowData, RowIndex}; -use crate::types::{FromSql, WrongType}; -use crate::{Column, Error}; - -pub struct Row { - statement: Statement, - data: RowData, -} - -impl Row { - pub(crate) fn parse(statement: Statement, data: DataRowBody) -> Result { - let data = RowData::parse(data).map_err(Error::parse)?; - Ok(Row { statement, data }) - } - - pub fn columns(&self) -> &[Column] { - self.statement.columns() - } - - pub fn len(&self) -> usize { - self.columns().len() - } - - pub fn get<'b, I, T>(&'b self, idx: I) -> T - where - I: RowIndex + fmt::Debug, - T: FromSql<'b>, - { - match self.get_inner(&idx) { - Ok(Some(ok)) => ok, - Err(err) => panic!("error retrieving column {:?}: {:?}", idx, err), - Ok(None) => panic!("no such column {:?}", idx), - } - } - - pub fn try_get<'b, I, T>(&'b self, idx: I) -> Result, Error> - where - I: RowIndex, - T: FromSql<'b>, - { - self.get_inner(&idx) - } - - fn get_inner<'b, I, T>(&'b self, idx: &I) -> Result, Error> - where - I: RowIndex, - T: FromSql<'b>, - { - let idx = match idx.__idx(&self.columns()) { - Some(idx) => idx, - None => return Ok(None), - }; - - let ty = self.statement.columns()[idx].type_(); - if !::accepts(ty) { - return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())))); - } - let value = FromSql::from_sql_nullable(ty, self.data.get(idx)); - value.map(Some).map_err(Error::from_sql) - } -} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs new file mode 100644 index 000000000..baccc93c6 --- /dev/null +++ b/tokio-postgres/src/row.rs @@ -0,0 +1,134 @@ +use fallible_iterator::FallibleIterator; +use postgres_protocol::message::backend::DataRowBody; +use std::fmt; +use std::ops::Range; + +use crate::proto; +use crate::row::sealed::Sealed; +use crate::stmt::Column; +use crate::types::{FromSql, WrongType}; +use crate::Error; + +mod sealed { + pub trait Sealed {} +} + +/// A trait implemented by types that can index into columns of a row. +/// +/// This cannot be implemented outside of this crate. +pub trait RowIndex: Sealed { + fn __idx(&self, columns: &[Column]) -> Option; +} + +impl Sealed for usize {} + +impl RowIndex for usize { + #[inline] + fn __idx(&self, columns: &[Column]) -> Option { + if *self >= columns.len() { + None + } else { + Some(*self) + } + } +} + +impl Sealed for str {} + +impl RowIndex for str { + #[inline] + fn __idx(&self, columns: &[Column]) -> Option { + if let Some(idx) = columns.iter().position(|d| d.name() == self) { + return Some(idx); + }; + + // FIXME ASCII-only case insensitivity isn't really the right thing to + // do. Postgres itself uses a dubious wrapper around tolower and JDBC + // uses the US locale. + columns + .iter() + .position(|d| d.name().eq_ignore_ascii_case(self)) + } +} + +impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} + +impl<'a, T> RowIndex for &'a T +where + T: ?Sized + RowIndex, +{ + #[inline] + fn __idx(&self, columns: &[Column]) -> Option { + T::__idx(*self, columns) + } +} + +pub struct Row { + statement: proto::Statement, + body: DataRowBody, + ranges: Vec>>, +} + +impl Row { + #[allow(clippy::new_ret_no_self)] + pub(crate) fn new(statement: proto::Statement, body: DataRowBody) -> Result { + let ranges = body.ranges().collect().map_err(Error::parse)?; + Ok(Row { + statement, + body, + ranges, + }) + } + + pub fn columns(&self) -> &[Column] { + self.statement.columns() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn len(&self) -> usize { + self.columns().len() + } + + pub fn get<'a, I, T>(&'a self, idx: I) -> T + where + I: RowIndex + fmt::Display, + T: FromSql<'a>, + { + match self.get_inner(&idx) { + Ok(Some(ok)) => ok, + Err(err) => panic!("error retrieving column {}: {}", idx, err), + Ok(None) => panic!("no such column {}", idx), + } + } + + pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result, Error> + where + I: RowIndex, + T: FromSql<'a>, + { + self.get_inner(&idx) + } + + fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result, Error> + where + I: RowIndex, + T: FromSql<'a>, + { + let idx = match idx.__idx(self.columns()) { + Some(idx) => idx, + None => return Ok(None), + }; + + let ty = self.columns()[idx].type_(); + if !T::accepts(ty) { + return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())))); + } + + let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); + let value = FromSql::from_sql_nullable(ty, buf); + value.map(Some).map_err(Error::from_sql) + } +} diff --git a/tokio-postgres/src/rows.rs b/tokio-postgres/src/rows.rs deleted file mode 100644 index 0360dfa55..000000000 --- a/tokio-postgres/src/rows.rs +++ /dev/null @@ -1,78 +0,0 @@ -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::backend::DataRowBody; -use std::io; -use std::ops::Range; - -use crate::rows::sealed::Sealed; -use crate::stmt::Column; - -mod sealed { - use crate::stmt::Column; - - pub trait Sealed { - fn __idx(&self, stmt: &[Column]) -> Option; - } -} - -/// A trait implemented by types that can index into columns of a row. -/// -/// This cannot be implemented outside of this crate. -pub trait RowIndex: Sealed {} - -impl Sealed for usize { - #[inline] - fn __idx(&self, stmt: &[Column]) -> Option { - if *self >= stmt.len() { - None - } else { - Some(*self) - } - } -} - -impl RowIndex for usize {} - -impl Sealed for str { - #[inline] - fn __idx(&self, stmt: &[Column]) -> Option { - if let Some(idx) = stmt.iter().position(|d| d.name() == self) { - return Some(idx); - }; - - // FIXME ASCII-only case insensitivity isn't really the right thing to - // do. Postgres itself uses a dubious wrapper around tolower and JDBC - // uses the US locale. - stmt.iter() - .position(|d| d.name().eq_ignore_ascii_case(self)) - } -} - -impl RowIndex for str {} - -impl<'a, T> Sealed for &'a T -where - T: ?Sized + Sealed, -{ - #[inline] - fn __idx(&self, columns: &[Column]) -> Option { - T::__idx(*self, columns) - } -} - -impl<'a, T> RowIndex for &'a T where T: ?Sized + Sealed {} - -pub(crate) struct RowData { - body: DataRowBody, - ranges: Vec>>, -} - -impl RowData { - pub fn parse(body: DataRowBody) -> io::Result { - let ranges = body.ranges().collect()?; - Ok(RowData { body, ranges }) - } - - pub fn get(&self, index: usize) -> Option<&[u8]> { - self.ranges[index].clone().map(|r| &self.body.buffer()[r]) - } -} From 10a850a527d853942f0a3e5847a533dc13abde3b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 10 Dec 2018 21:15:39 -0800 Subject: [PATCH 043/819] Hide trait method --- tokio-postgres/src/row.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index baccc93c6..d63021305 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -17,6 +17,7 @@ mod sealed { /// /// This cannot be implemented outside of this crate. pub trait RowIndex: Sealed { + #[doc(hidden)] fn __idx(&self, columns: &[Column]) -> Option; } From 46f4a2911c04dc03cf974a656b2764c3aee7e02f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 13 Dec 2018 21:03:47 -0800 Subject: [PATCH 044/819] Deserialize libpq-style connection strings Just the key/value pair version for now - URLs will come later --- tokio-postgres/Cargo.toml | 3 + tokio-postgres/src/builder.rs | 182 +++++++++++++++++++++++++--- tokio-postgres/src/error/mod.rs | 6 + tokio-postgres/src/proto/connect.rs | 20 +-- tokio-postgres/tests/test.rs | 136 +++++---------------- tokio-postgres/tests/types/mod.rs | 26 ++-- 6 files changed, 226 insertions(+), 147 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 398580ff7..0b9eb9075 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -10,6 +10,9 @@ readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql", "async"] categories = ["database"] +[lib] +test = false + [package.metadata.docs.rs] features = [ "with-bit-vec-0.5", diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index de3ec2648..0bfc1a036 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -1,13 +1,15 @@ +use std::borrow::Cow; use std::collections::HashMap; +use std::iter; +use std::str::{self, FromStr}; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::ConnectFuture; -use crate::{Connect, TlsMode}; +use crate::{Connect, Error, TlsMode}; #[derive(Clone)] pub struct Builder { params: HashMap, - password: Option, } impl Default for Builder { @@ -22,27 +24,23 @@ impl Builder { params.insert("client_encoding".to_string(), "UTF8".to_string()); params.insert("timezone".to_string(), "GMT".to_string()); - Builder { - params, - password: None, - } + Builder { params } } pub fn user(&mut self, user: &str) -> &mut Builder { self.param("user", user) } - pub fn database(&mut self, database: &str) -> &mut Builder { - self.param("database", database) + pub fn dbname(&mut self, database: &str) -> &mut Builder { + self.param("dbname", database) } - pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { - self.params.insert(key.to_string(), value.to_string()); - self + pub fn password(&mut self, password: &str) -> &mut Builder { + self.param("password", password) } - pub fn password(&mut self, password: &str) -> &mut Builder { - self.password = Some(password.to_string()); + pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + self.params.insert(key.to_string(), value.to_string()); self } @@ -51,11 +49,157 @@ impl Builder { S: AsyncRead + AsyncWrite, T: TlsMode, { - Connect(ConnectFuture::new( - stream, - tls_mode, - self.password.clone(), - self.params.clone(), - )) + Connect(ConnectFuture::new(stream, tls_mode, self.params.clone())) + } +} + +impl FromStr for Builder { + type Err = Error; + + fn from_str(s: &str) -> Result { + let mut parser = Parser::new(s); + let mut builder = Builder::new(); + + while let Some((key, value)) = parser.parameter()? { + builder.param(key, &value); + } + + Ok(builder) + } +} + +struct Parser<'a> { + s: &'a str, + it: iter::Peekable>, +} + +impl<'a> Parser<'a> { + fn new(s: &'a str) -> Parser<'a> { + Parser { + s, + it: s.char_indices().peekable(), + } + } + + fn skip_ws(&mut self) { + while let Some(&(_, ' ')) = self.it.peek() { + self.it.next(); + } + } + + fn take_while(&mut self, f: F) -> &'a str + where + F: Fn(char) -> bool, + { + let start = match self.it.peek() { + Some(&(i, _)) => i, + None => return "", + }; + + loop { + match self.it.peek() { + Some(&(_, c)) if f(c) => { + self.it.next(); + } + Some(&(i, _)) => return &self.s[start..i], + None => return &self.s[start..], + } + } + } + + fn eat(&mut self, target: char) -> Result<(), Error> { + match self.it.next() { + Some((_, c)) if c == target => Ok(()), + Some((i, c)) => { + let m = format!( + "unexpected character at byte {}: expected `{}` but got `{}`", + i, target, c + ); + Err(Error::connection_syntax(m.into())) + } + None => Err(Error::connection_syntax("unexpected EOF".into())), + } + } + + fn eat_if(&mut self, target: char) -> bool { + match self.it.peek() { + Some(&(_, c)) if c == target => { + self.it.next(); + true + } + _ => false, + } + } + + fn keyword(&mut self) -> Option<&'a str> { + let s = self.take_while(|c| match c { + ' ' | '=' => false, + _ => true, + }); + + if s.is_empty() { + None + } else { + Some(s) + } + } + + fn value(&mut self) -> Result, Error> { + let raw = if self.eat_if('\'') { + let s = self.take_while(|c| c != '\''); + self.eat('\'')?; + s + } else { + let s = self.take_while(|c| c != ' '); + if s.is_empty() { + return Err(Error::connection_syntax("unexpected EOF".into())); + } + s + }; + + self.unescape_value(raw) + } + + fn unescape_value(&mut self, raw: &'a str) -> Result, Error> { + if !raw.contains('\\') { + return Ok(Cow::Borrowed(raw)); + } + + let mut s = String::with_capacity(raw.len()); + + let mut it = raw.chars(); + while let Some(c) = it.next() { + let to_push = if c == '\\' { + match it.next() { + Some('\'') => '\'', + Some('\\') => '\\', + Some(c) => { + return Err(Error::connection_syntax( + format!("invalid escape `\\{}`", c).into(), + )); + } + None => return Err(Error::connection_syntax("unexpected EOF".into())), + } + } else { + c + }; + s.push(to_push); + } + + Ok(Cow::Owned(s)) + } + + fn parameter(&mut self) -> Result)>, Error> { + self.skip_ws(); + let keyword = match self.keyword() { + Some(keyword) => keyword, + None => return Ok(None), + }; + self.skip_ws(); + self.eat('=')?; + self.skip_ws(); + let value = self.value()?; + + Ok(Some((keyword, value))) } } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 1ff20c37f..13a8149b7 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -345,6 +345,7 @@ enum Kind { MissingPassword, UnsupportedAuthentication, Authentication, + ConnectionSyntax, } struct ErrorInner { @@ -381,6 +382,7 @@ impl fmt::Display for Error { Kind::MissingPassword => "password not provided", Kind::UnsupportedAuthentication => "unsupported authentication method requested", Kind::Authentication => "authentication error", + Kind::ConnectionSyntax => "invalid connection string", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -479,4 +481,8 @@ impl Error { pub(crate) fn authentication(e: io::Error) -> Error { Error::new(Kind::Authentication, Some(Box::new(e))) } + + pub(crate) fn connection_syntax(e: Box) -> Error { + Error::new(Kind::ConnectionSyntax, Some(e)) + } } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 8b5fe6075..c2dde72be 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -24,7 +24,6 @@ where #[state_machine_future(start, transitions(SendingStartup))] Start { future: TlsFuture, - password: Option, params: HashMap, }, #[state_machine_future(transitions(ReadingAuth))] @@ -80,6 +79,14 @@ where let (stream, channel_binding) = try_ready!(state.future.poll()); let mut state = state.take(); + // we don't want to send the password as a param + let password = state.params.remove("password"); + + // libpq uses the parameter "dbname" but the protocol expects "database" (!?!) + if let Some(dbname) = state.params.remove("dbname") { + state.params.insert("database".to_string(), dbname); + } + let mut buf = vec![]; frontend::startup_message(state.params.iter().map(|(k, v)| (&**k, &**v)), &mut buf) .map_err(Error::encode)?; @@ -94,7 +101,7 @@ where transition!(SendingStartup { future: stream.send(buf), user, - password: state.password, + password, channel_binding, }) } @@ -317,12 +324,7 @@ where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new( - stream: S, - tls_mode: T, - password: Option, - params: HashMap, - ) -> ConnectFuture { - Connect::start(TlsFuture::new(stream, tls_mode), password, params) + pub fn new(stream: S, tls_mode: T, params: HashMap) -> ConnectFuture { + Connect::start(TlsFuture::new(stream, tls_mode), params) } } diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test.rs index 9df119df8..8843beabc 100644 --- a/tokio-postgres/tests/test.rs +++ b/tokio-postgres/tests/test.rs @@ -16,19 +16,19 @@ use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; mod types; fn connect( - builder: &tokio_postgres::Builder, + s: &str, ) -> impl Future), Error = tokio_postgres::Error> { - let builder = builder.clone(); + let builder = s.parse::().unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) .and_then(move |s| builder.connect(s, NoTls)) } -fn smoke_test(builder: &tokio_postgres::Builder) { +fn smoke_test(s: &str) { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect(builder); + let handshake = connect(s); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -51,11 +51,7 @@ fn plain_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("pass_user") - .database("postgres"), - ); + let handshake = connect("user=pass_user dbname=postgres"); runtime.block_on(handshake).err().unwrap(); } @@ -64,12 +60,7 @@ fn plain_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("pass_user") - .password("foo") - .database("postgres"), - ); + let handshake = connect("user=pass_user password=foo dbname=postgres"); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} @@ -79,12 +70,7 @@ fn plain_password_wrong() { #[test] fn plain_password_ok() { - smoke_test( - tokio_postgres::Builder::new() - .user("pass_user") - .password("password") - .database("postgres"), - ); + smoke_test("user=pass_user password=password dbname=postgres"); } #[test] @@ -92,11 +78,7 @@ fn md5_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("md5_user") - .database("postgres"), - ); + let handshake = connect("user=md5_user dbname=postgres"); runtime.block_on(handshake).err().unwrap(); } @@ -105,12 +87,7 @@ fn md5_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("md5_user") - .password("foo") - .database("postgres"), - ); + let handshake = connect("user=md5_user password=foo dbname=postgres"); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} @@ -120,12 +97,7 @@ fn md5_password_wrong() { #[test] fn md5_password_ok() { - smoke_test( - tokio_postgres::Builder::new() - .user("md5_user") - .password("password") - .database("postgres"), - ); + smoke_test("user=md5_user password=password dbname=postgres"); } #[test] @@ -133,11 +105,7 @@ fn scram_password_missing() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("scram_user") - .database("postgres"), - ); + let handshake = connect("user=scram_user dbname=postgres"); runtime.block_on(handshake).err().unwrap(); } @@ -146,12 +114,7 @@ fn scram_password_wrong() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let handshake = connect( - tokio_postgres::Builder::new() - .user("scram_user") - .password("foo") - .database("postgres"), - ); + let handshake = connect("user=scram_user password=foo dbname=postgres"); match runtime.block_on(handshake) { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} @@ -161,12 +124,7 @@ fn scram_password_wrong() { #[test] fn scram_password_ok() { - smoke_test( - tokio_postgres::Builder::new() - .user("scram_user") - .password("password") - .database("postgres"), - ); + smoke_test("user=scram_user password=password dbname=postgres"); } #[test] @@ -174,9 +132,7 @@ fn pipelined_prepare() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -194,9 +150,7 @@ fn insert_select() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -228,9 +182,7 @@ fn query_portal() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -270,9 +222,7 @@ fn cancel_query() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let cancel_data = connection.cancel_data(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -306,9 +256,7 @@ fn custom_enum() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -342,9 +290,7 @@ fn custom_domain() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -367,9 +313,7 @@ fn custom_array() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -392,9 +336,7 @@ fn custom_composite() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -431,9 +373,7 @@ fn custom_range() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -459,9 +399,7 @@ fn custom_simple() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -478,9 +416,7 @@ fn notifications() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, mut connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, mut connection) = runtime.block_on(connect("user=postgres")).unwrap(); let (tx, rx) = mpsc::unbounded(); let connection = future::poll_fn(move || { @@ -524,9 +460,7 @@ fn transaction_commit() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -559,9 +493,7 @@ fn transaction_abort() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -596,9 +528,7 @@ fn copy_in() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -641,9 +571,7 @@ fn copy_in_error() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -682,9 +610,7 @@ fn copy_out() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -713,9 +639,7 @@ fn transaction_builder_around_moved_client() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); - let (mut client, connection) = runtime - .block_on(connect(tokio_postgres::Builder::new().user("postgres"))) - .unwrap(); + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); diff --git a/tokio-postgres/tests/types/mod.rs b/tokio-postgres/tests/types/mod.rs index 6ce526d0d..2ee54167e 100644 --- a/tokio-postgres/tests/types/mod.rs +++ b/tokio-postgres/tests/types/mod.rs @@ -32,7 +32,7 @@ where { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -190,7 +190,7 @@ fn test_text_params() { fn test_borrowed_text() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -207,7 +207,7 @@ fn test_borrowed_text() { fn test_bpchar_params() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -240,7 +240,7 @@ fn test_bpchar_params() { fn test_citext_params() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -286,7 +286,7 @@ fn test_bytea_params() { fn test_borrowed_bytea() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -345,7 +345,7 @@ where { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -372,7 +372,7 @@ fn test_f64_nan_param() { fn test_pg_database_datname() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -388,7 +388,7 @@ fn test_pg_database_datname() { fn test_slice() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -419,7 +419,7 @@ fn test_slice() { fn test_slice_wrong_type() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -445,7 +445,7 @@ fn test_slice_wrong_type() { fn test_slice_range() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -502,7 +502,7 @@ fn domain() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -531,7 +531,7 @@ fn domain() { fn composite() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -566,7 +566,7 @@ fn composite() { fn enum_() { let mut runtime = Runtime::new().unwrap(); - let handshake = connect(tokio_postgres::Builder::new().user("postgres")); + let handshake = connect("user=postgres"); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); From 20874e683f45b6b4c3a55c3e9d7a588ec245b6a8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 13 Dec 2018 21:16:40 -0800 Subject: [PATCH 045/819] Fix build --- tokio-postgres-native-tls/src/test.rs | 6 +++--- tokio-postgres-openssl/src/test.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index f98b563fb..6c2a8ac64 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -44,7 +44,7 @@ fn require() { smoke_test( tokio_postgres::Builder::new() .user("ssl_user") - .database("postgres"), + .dbname("postgres"), RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -60,7 +60,7 @@ fn prefer() { smoke_test( tokio_postgres::Builder::new() .user("ssl_user") - .database("postgres"), + .dbname("postgres"), PreferTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -77,7 +77,7 @@ fn scram_user() { tokio_postgres::Builder::new() .user("scram_user") .password("password") - .database("postgres"), + .dbname("postgres"), RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 72d1dd785..db58b4487 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -41,7 +41,7 @@ fn require() { smoke_test( tokio_postgres::Builder::new() .user("ssl_user") - .database("postgres"), + .dbname("postgres"), RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -54,7 +54,7 @@ fn prefer() { smoke_test( tokio_postgres::Builder::new() .user("ssl_user") - .database("postgres"), + .dbname("postgres"), PreferTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -68,7 +68,7 @@ fn scram_user() { tokio_postgres::Builder::new() .user("scram_user") .password("password") - .database("postgres"), + .dbname("postgres"), RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } From 7297661cef174c2d97c76bff9aee0dbd7f9d2d7b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 16 Dec 2018 16:08:55 -0800 Subject: [PATCH 046/819] Shift tests down --- tokio-postgres/tests/{test.rs => test/main.rs} | 0 tokio-postgres/tests/{ => test}/types/bit_vec_07.rs | 0 tokio-postgres/tests/{ => test}/types/chrono_04.rs | 0 tokio-postgres/tests/{ => test}/types/eui48_04.rs | 0 tokio-postgres/tests/{ => test}/types/geo_010.rs | 0 tokio-postgres/tests/{ => test}/types/mod.rs | 0 tokio-postgres/tests/{ => test}/types/serde_json_1.rs | 0 tokio-postgres/tests/{ => test}/types/uuid_07.rs | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename tokio-postgres/tests/{test.rs => test/main.rs} (100%) rename tokio-postgres/tests/{ => test}/types/bit_vec_07.rs (100%) rename tokio-postgres/tests/{ => test}/types/chrono_04.rs (100%) rename tokio-postgres/tests/{ => test}/types/eui48_04.rs (100%) rename tokio-postgres/tests/{ => test}/types/geo_010.rs (100%) rename tokio-postgres/tests/{ => test}/types/mod.rs (100%) rename tokio-postgres/tests/{ => test}/types/serde_json_1.rs (100%) rename tokio-postgres/tests/{ => test}/types/uuid_07.rs (100%) diff --git a/tokio-postgres/tests/test.rs b/tokio-postgres/tests/test/main.rs similarity index 100% rename from tokio-postgres/tests/test.rs rename to tokio-postgres/tests/test/main.rs diff --git a/tokio-postgres/tests/types/bit_vec_07.rs b/tokio-postgres/tests/test/types/bit_vec_07.rs similarity index 100% rename from tokio-postgres/tests/types/bit_vec_07.rs rename to tokio-postgres/tests/test/types/bit_vec_07.rs diff --git a/tokio-postgres/tests/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs similarity index 100% rename from tokio-postgres/tests/types/chrono_04.rs rename to tokio-postgres/tests/test/types/chrono_04.rs diff --git a/tokio-postgres/tests/types/eui48_04.rs b/tokio-postgres/tests/test/types/eui48_04.rs similarity index 100% rename from tokio-postgres/tests/types/eui48_04.rs rename to tokio-postgres/tests/test/types/eui48_04.rs diff --git a/tokio-postgres/tests/types/geo_010.rs b/tokio-postgres/tests/test/types/geo_010.rs similarity index 100% rename from tokio-postgres/tests/types/geo_010.rs rename to tokio-postgres/tests/test/types/geo_010.rs diff --git a/tokio-postgres/tests/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs similarity index 100% rename from tokio-postgres/tests/types/mod.rs rename to tokio-postgres/tests/test/types/mod.rs diff --git a/tokio-postgres/tests/types/serde_json_1.rs b/tokio-postgres/tests/test/types/serde_json_1.rs similarity index 100% rename from tokio-postgres/tests/types/serde_json_1.rs rename to tokio-postgres/tests/test/types/serde_json_1.rs diff --git a/tokio-postgres/tests/types/uuid_07.rs b/tokio-postgres/tests/test/types/uuid_07.rs similarity index 100% rename from tokio-postgres/tests/types/uuid_07.rs rename to tokio-postgres/tests/test/types/uuid_07.rs From 707b87a18ea6630facbc989a15173ecb7165b37d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 16 Dec 2018 18:11:52 -0800 Subject: [PATCH 047/819] Fix parameter parsing and add test Our behavior matches libpq's - in particular it allows any escape sequence and trailing \'s... --- tokio-postgres/src/builder.rs | 113 +++++++++++++++++++---------- tokio-postgres/tests/test/main.rs | 1 + tokio-postgres/tests/test/parse.rs | 34 +++++++++ 3 files changed, 109 insertions(+), 39 deletions(-) create mode 100644 tokio-postgres/tests/test/parse.rs diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 0bfc1a036..f2919c6b8 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -1,5 +1,4 @@ -use std::borrow::Cow; -use std::collections::HashMap; +use std::collections::hash_map::{self, HashMap}; use std::iter; use std::str::{self, FromStr}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -44,6 +43,11 @@ impl Builder { self } + /// FIXME do we want this? + pub fn iter(&self) -> Iter<'_> { + Iter(self.params.iter()) + } + pub fn connect(&self, stream: S, tls_mode: T) -> Connect where S: AsyncRead + AsyncWrite, @@ -61,13 +65,30 @@ impl FromStr for Builder { let mut builder = Builder::new(); while let Some((key, value)) = parser.parameter()? { - builder.param(key, &value); + builder.params.insert(key.to_string(), value); } Ok(builder) } } +#[derive(Debug, Clone)] +pub struct Iter<'a>(hash_map::Iter<'a, String, String>); + +impl<'a> Iterator for Iter<'a> { + type Item = (&'a str, &'a str); + + fn next(&mut self) -> Option<(&'a str, &'a str)> { + self.0.next().map(|(k, v)| (&**k, &**v)) + } +} + +impl<'a> ExactSizeIterator for Iter<'a> { + fn len(&self) -> usize { + self.0.len() + } +} + struct Parser<'a> { s: &'a str, it: iter::Peekable>, @@ -82,9 +103,7 @@ impl<'a> Parser<'a> { } fn skip_ws(&mut self) { - while let Some(&(_, ' ')) = self.it.peek() { - self.it.next(); - } + self.take_while(|c| c.is_whitespace()); } fn take_while(&mut self, f: F) -> &'a str @@ -133,7 +152,8 @@ impl<'a> Parser<'a> { fn keyword(&mut self) -> Option<&'a str> { let s = self.take_while(|c| match c { - ' ' | '=' => false, + c if c.is_whitespace() => false, + '=' => false, _ => true, }); @@ -144,52 +164,67 @@ impl<'a> Parser<'a> { } } - fn value(&mut self) -> Result, Error> { - let raw = if self.eat_if('\'') { - let s = self.take_while(|c| c != '\''); + fn value(&mut self) -> Result { + let value = if self.eat_if('\'') { + let value = self.quoted_value()?; self.eat('\'')?; - s + value } else { - let s = self.take_while(|c| c != ' '); - if s.is_empty() { - return Err(Error::connection_syntax("unexpected EOF".into())); - } - s + self.simple_value()? }; - self.unescape_value(raw) + Ok(value) } - fn unescape_value(&mut self, raw: &'a str) -> Result, Error> { - if !raw.contains('\\') { - return Ok(Cow::Borrowed(raw)); + fn simple_value(&mut self) -> Result { + let mut value = String::new(); + + while let Some(&(_, c)) = self.it.peek() { + if c.is_whitespace() { + break; + } + + self.it.next(); + if c == '\\' { + if let Some((_, c2)) = self.it.next() { + value.push(c2); + } + } else { + value.push(c); + } + } + + if value.is_empty() { + return Err(Error::connection_syntax("unexpected EOF".into())); } - let mut s = String::with_capacity(raw.len()); - - let mut it = raw.chars(); - while let Some(c) = it.next() { - let to_push = if c == '\\' { - match it.next() { - Some('\'') => '\'', - Some('\\') => '\\', - Some(c) => { - return Err(Error::connection_syntax( - format!("invalid escape `\\{}`", c).into(), - )); - } - None => return Err(Error::connection_syntax("unexpected EOF".into())), + Ok(value) + } + + fn quoted_value(&mut self) -> Result { + let mut value = String::new(); + + while let Some(&(_, c)) = self.it.peek() { + if c == '\'' { + return Ok(value); + } + + self.it.next(); + if c == '\\' { + if let Some((_, c2)) = self.it.next() { + value.push(c2); } } else { - c - }; - s.push(to_push); + value.push(c); + } } - Ok(Cow::Owned(s)) + Err(Error::connection_syntax( + "unterminated quoted connection parameter value".into(), + )) } - fn parameter(&mut self) -> Result)>, Error> { + fn parameter(&mut self) -> Result, Error> { self.skip_ws(); let keyword = match self.keyword() { Some(keyword) => keyword, diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 8843beabc..15e391375 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -13,6 +13,7 @@ use tokio_postgres::error::SqlState; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; +mod parse; mod types; fn connect( diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs new file mode 100644 index 000000000..236d48703 --- /dev/null +++ b/tokio-postgres/tests/test/parse.rs @@ -0,0 +1,34 @@ +use std::collections::HashMap; + +#[test] +fn pairs_ok() { + let params = r"user=foo password=' fizz \'buzz\\ ' thing = ''" + .parse::() + .unwrap(); + let params = params.iter().collect::>(); + + let mut expected = HashMap::new(); + expected.insert("user", "foo"); + expected.insert("password", r" fizz 'buzz\ "); + expected.insert("thing", ""); + expected.insert("client_encoding", "UTF8"); + expected.insert("timezone", "GMT"); + + assert_eq!(params, expected); +} + +#[test] +fn pairs_ws() { + let params = " user\t=\r\n\x0bfoo \t password = hunter2 " + .parse::() + .unwrap();; + let params = params.iter().collect::>(); + + let mut expected = HashMap::new(); + expected.insert("user", "foo"); + expected.insert("password", r"hunter2"); + expected.insert("client_encoding", "UTF8"); + expected.insert("timezone", "GMT"); + + assert_eq!(params, expected); +} From 7df7fc715bdfbe1d5d7726e730071ab4bc8523a7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 16 Dec 2018 21:30:52 -0800 Subject: [PATCH 048/819] Start on runtime API --- .circleci/config.yml | 1 + tokio-postgres-native-tls/Cargo.toml | 2 +- tokio-postgres-native-tls/src/test.rs | 2 +- tokio-postgres-openssl/Cargo.toml | 2 +- tokio-postgres-openssl/src/test.rs | 2 +- tokio-postgres/Cargo.toml | 8 ++ tokio-postgres/src/builder.rs | 8 +- tokio-postgres/src/lib.rs | 8 +- .../src/proto/{connect.rs => handshake.rs} | 10 +- tokio-postgres/src/proto/mod.rs | 4 +- tokio-postgres/src/socket.rs | 96 +++++++++++++++++++ tokio-postgres/tests/test/main.rs | 2 +- 12 files changed, 127 insertions(+), 18 deletions(-) rename tokio-postgres/src/proto/{connect.rs => handshake.rs} (98%) create mode 100644 tokio-postgres/src/socket.rs diff --git a/.circleci/config.yml b/.circleci/config.yml index 324151f12..68edc5c18 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -37,5 +37,6 @@ jobs: - run: cargo fmt --all -- --check - run: cargo clippy --all - run: cargo test --all + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features - *SAVE_DEPS diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 9ecfb2c3b..6ba2d66ef 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -9,7 +9,7 @@ futures = "0.1" native-tls = "0.2" tokio-io = "0.1" tokio-tls = "0.2" -tokio-postgres = { version = "0.3", path = "../tokio-postgres" } +tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 6c2a8ac64..8e21bf0d1 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -15,7 +15,7 @@ where let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.connect(s, tls)); + .and_then(|s| builder.handshake(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index 3903050e6..c875b1188 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -9,7 +9,7 @@ futures = "0.1" openssl = "0.10" tokio-io = "0.1" tokio-openssl = "0.3" -tokio-postgres = { version = "0.3", path = "../tokio-postgres" } +tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index db58b4487..6729916b7 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -15,7 +15,7 @@ where let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.connect(s, tls)); + .and_then(|s| builder.handshake(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 0b9eb9075..c317c5d62 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -27,6 +27,9 @@ features = [ circle-ci = { repository = "sfackler/rust-postgres" } [features] +default = ["runtime"] +runtime = ["tokio-tcp", "tokio-uds"] + "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] "with-eui48-0.4" = ["eui48-04"] @@ -48,6 +51,8 @@ tokio-codec = "0.1" tokio-io = "0.1" void = "1.0" +tokio-tcp = { version = "0.1", optional = true } + bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } @@ -56,6 +61,9 @@ serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } +[target.'cfg(unix)'.dependencies] +tokio-uds = { version = "0.2", optional = true } + [dev-dependencies] tokio = "0.1.7" env_logger = "0.5" diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index f2919c6b8..85981e9a0 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -3,8 +3,8 @@ use std::iter; use std::str::{self, FromStr}; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::ConnectFuture; -use crate::{Connect, Error, TlsMode}; +use crate::proto::HandshakeFuture; +use crate::{Error, Handshake, TlsMode}; #[derive(Clone)] pub struct Builder { @@ -48,12 +48,12 @@ impl Builder { Iter(self.params.iter()) } - pub fn connect(&self, stream: S, tls_mode: T) -> Connect + pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake where S: AsyncRead + AsyncWrite, T: TlsMode, { - Connect(ConnectFuture::new(stream, tls_mode, self.params.clone())) + Handshake(HandshakeFuture::new(stream, tls_mode, self.params.clone())) } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index e1f86ce92..c9d77d1cc 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -10,6 +10,8 @@ pub use crate::builder::*; pub use crate::error::*; use crate::proto::CancelFuture; pub use crate::row::{Row, RowIndex}; +#[cfg(feature = "runtime")] +pub use crate::socket::Socket; pub use crate::stmt::Column; pub use crate::tls::*; use crate::types::{ToSql, Type}; @@ -18,6 +20,8 @@ mod builder; pub mod error; mod proto; mod row; +#[cfg(feature = "runtime")] +mod socket; mod stmt; mod tls; pub mod types; @@ -156,12 +160,12 @@ where } #[must_use = "futures do nothing unless polled"] -pub struct Connect(proto::ConnectFuture) +pub struct Handshake(proto::HandshakeFuture) where S: AsyncRead + AsyncWrite, T: TlsMode; -impl Future for Connect +impl Future for Handshake where S: AsyncRead + AsyncWrite, T: TlsMode, diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/handshake.rs similarity index 98% rename from tokio-postgres/src/proto/connect.rs rename to tokio-postgres/src/proto/handshake.rs index c2dde72be..3089ffc11 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -16,7 +16,7 @@ use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; use crate::{CancelData, ChannelBinding, Error, TlsMode}; #[derive(StateMachineFuture)] -pub enum Connect +pub enum Handshake where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -70,7 +70,7 @@ where Failed(Error), } -impl PollConnect for Connect +impl PollHandshake for Handshake where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -319,12 +319,12 @@ where } } -impl ConnectFuture +impl HandshakeFuture where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, params: HashMap) -> ConnectFuture { - Connect::start(TlsFuture::new(stream, tls_mode), params) + pub fn new(stream: S, tls_mode: T, params: HashMap) -> HandshakeFuture { + Handshake::start(TlsFuture::new(stream, tls_mode), params) } } diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index b1c82cb33..9d19fa0e0 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -22,11 +22,11 @@ mod bind; mod cancel; mod client; mod codec; -mod connect; mod connection; mod copy_in; mod copy_out; mod execute; +mod handshake; mod portal; mod prepare; mod query; @@ -42,11 +42,11 @@ pub use crate::proto::bind::BindFuture; pub use crate::proto::cancel::CancelFuture; pub use crate::proto::client::Client; pub use crate::proto::codec::PostgresCodec; -pub use crate::proto::connect::ConnectFuture; pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; pub use crate::proto::execute::ExecuteFuture; +pub use crate::proto::handshake::HandshakeFuture; pub use crate::proto::portal::Portal; pub use crate::proto::prepare::PrepareFuture; pub use crate::proto::query::QueryStream; diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs new file mode 100644 index 000000000..32d30b24c --- /dev/null +++ b/tokio-postgres/src/socket.rs @@ -0,0 +1,96 @@ +use bytes::{Buf, BufMut}; +use futures::Poll; +use std::io::{self, Read, Write}; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_tcp::TcpStream; +#[cfg(unix)] +use tokio_uds::UnixStream; + +enum Inner { + Tcp(TcpStream), + #[cfg(unix)] + Unix(UnixStream), +} + +pub struct Socket(Inner); + +impl Socket { + pub(crate) fn new_tcp(stream: TcpStream) -> Socket { + Socket(Inner::Tcp(stream)) + } + + #[cfg(unix)] + pub(crate) fn new_unix(stream: UnixStream) -> Socket { + Socket(Inner::Unix(stream)) + } +} + +impl Read for Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match &mut self.0 { + Inner::Tcp(s) => s.read(buf), + #[cfg(unix)] + Inner::Unix(s) => s.read(buf), + } + } +} + +impl AsyncRead for Socket { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + match &self.0 { + Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), + #[cfg(unix)] + Inner::Unix(s) => s.prepare_uninitialized_buffer(buf), + } + } + + fn read_buf(&mut self, buf: &mut B) -> Poll + where + B: BufMut, + { + match &mut self.0 { + Inner::Tcp(s) => s.read_buf(buf), + #[cfg(unix)] + Inner::Unix(s) => s.read_buf(buf), + } + } +} + +impl Write for Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + match &mut self.0 { + Inner::Tcp(s) => s.write(buf), + #[cfg(unix)] + Inner::Unix(s) => s.write(buf), + } + } + + fn flush(&mut self) -> io::Result<()> { + match &mut self.0 { + Inner::Tcp(s) => s.flush(), + #[cfg(unix)] + Inner::Unix(s) => s.flush(), + } + } +} + +impl AsyncWrite for Socket { + fn shutdown(&mut self) -> Poll<(), io::Error> { + match &mut self.0 { + Inner::Tcp(s) => s.shutdown(), + #[cfg(unix)] + Inner::Unix(s) => s.shutdown(), + } + } + + fn write_buf(&mut self, buf: &mut B) -> Poll + where + B: Buf, + { + match &mut self.0 { + Inner::Tcp(s) => s.write_buf(buf), + #[cfg(unix)] + Inner::Unix(s) => s.write_buf(buf), + } + } +} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 15e391375..ad1736f03 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -22,7 +22,7 @@ fn connect( let builder = s.parse::().unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(move |s| builder.connect(s, NoTls)) + .and_then(move |s| builder.handshake(s, NoTls)) } fn smoke_test(s: &str) { From 919012d0c9292369eed828af4811e7cc6274b27f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 17 Dec 2018 21:25:21 -0800 Subject: [PATCH 049/819] Finish convenience API --- tokio-postgres/Cargo.toml | 5 +- tokio-postgres/src/builder.rs | 12 ++ tokio-postgres/src/error/mod.rs | 27 ++++ tokio-postgres/src/lib.rs | 21 ++++ tokio-postgres/src/proto/connect.rs | 177 +++++++++++++++++++++++++++ tokio-postgres/src/proto/mod.rs | 4 + tokio-postgres/tests/test/main.rs | 2 + tokio-postgres/tests/test/runtime.rs | 34 +++++ 8 files changed, 280 insertions(+), 2 deletions(-) create mode 100644 tokio-postgres/src/proto/connect.rs create mode 100644 tokio-postgres/tests/test/runtime.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index c317c5d62..349d4cdc2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -28,7 +28,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio-tcp", "tokio-uds"] +runtime = ["tokio-tcp", "tokio-uds", "futures-cpupool", "lazy_static"] "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] @@ -42,7 +42,6 @@ antidote = "1.0" bytes = "0.4" fallible-iterator = "0.1.6" futures = "0.1.7" -futures-cpupool = "0.1" log = "0.4" phf = "0.7.23" postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } @@ -52,6 +51,8 @@ tokio-io = "0.1" void = "1.0" tokio-tcp = { version = "0.1", optional = true } +futures-cpupool = { version = "0.1", optional = true } +lazy_static = { version = "1.0", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 85981e9a0..3090f09ae 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -3,7 +3,11 @@ use std::iter; use std::str::{self, FromStr}; use tokio_io::{AsyncRead, AsyncWrite}; +#[cfg(feature = "runtime")] +use crate::proto::ConnectFuture; use crate::proto::HandshakeFuture; +#[cfg(feature = "runtime")] +use crate::{Connect, Socket}; use crate::{Error, Handshake, TlsMode}; #[derive(Clone)] @@ -55,6 +59,14 @@ impl Builder { { Handshake(HandshakeFuture::new(stream, tls_mode, self.params.clone())) } + + #[cfg(feature = "runtime")] + pub fn connect(&self, tls_mode: T) -> Connect + where + T: TlsMode, + { + Connect(ConnectFuture::new(tls_mode, self.params.clone())) + } } impl FromStr for Builder { diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 13a8149b7..d83c50dd7 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -5,6 +5,8 @@ use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; use std::error::{self, Error as _Error}; use std::fmt; use std::io; +#[cfg(feature = "runtime")] +use std::num::ParseIntError; pub use self::sqlstate::*; @@ -346,6 +348,11 @@ enum Kind { UnsupportedAuthentication, Authentication, ConnectionSyntax, + Connect, + #[cfg(feature = "runtime")] + MissingHost, + #[cfg(feature = "runtime")] + InvalidPort, } struct ErrorInner { @@ -383,6 +390,11 @@ impl fmt::Display for Error { Kind::UnsupportedAuthentication => "unsupported authentication method requested", Kind::Authentication => "authentication error", Kind::ConnectionSyntax => "invalid connection string", + Kind::Connect => "error connecting to server", + #[cfg(feature = "runtime")] + Kind::MissingHost => "host not provided", + #[cfg(feature = "runtime")] + Kind::InvalidPort => "invalid port", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -485,4 +497,19 @@ impl Error { pub(crate) fn connection_syntax(e: Box) -> Error { Error::new(Kind::ConnectionSyntax, Some(e)) } + + #[cfg(feature = "runtime")] + pub(crate) fn connect(e: io::Error) -> Error { + Error::new(Kind::Connect, Some(Box::new(e))) + } + + #[cfg(feature = "runtime")] + pub(crate) fn missing_host() -> Error { + Error::new(Kind::MissingHost, None) + } + + #[cfg(feature = "runtime")] + pub(crate) fn invalid_port(e: ParseIntError) -> Error { + Error::new(Kind::InvalidPort, Some(Box::new(e))) + } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index c9d77d1cc..d2bfec1a9 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -180,6 +180,27 @@ where } } +#[cfg(feature = "runtime")] +#[must_use = "futures do nothing unless polled"] +pub struct Connect(proto::ConnectFuture) +where + T: TlsMode; + +#[cfg(feature = "runtime")] +impl Future for Connect +where + T: TlsMode, +{ + type Item = (Client, Connection); + type Error = Error; + + fn poll(&mut self) -> Poll<(Client, Connection), Error> { + let (client, connection) = try_ready!(self.0.poll()); + + Ok(Async::Ready((Client(client), Connection(connection)))) + } +} + #[must_use = "futures do nothing unless polled"] pub struct Prepare(proto::PrepareFuture); diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs new file mode 100644 index 000000000..ad21e9dfc --- /dev/null +++ b/tokio-postgres/src/proto/connect.rs @@ -0,0 +1,177 @@ +use futures::{try_ready, Async, Future, Poll}; +use futures_cpupool::{CpuFuture, CpuPool}; +use lazy_static::lazy_static; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use std::collections::HashMap; +use std::io; +use std::net::{SocketAddr, ToSocketAddrs}; +#[cfg(unix)] +use std::path::Path; +use std::vec; +use tokio_tcp::TcpStream; +#[cfg(unix)] +use tokio_uds::UnixStream; + +use crate::proto::{Client, Connection, HandshakeFuture}; +use crate::{Error, Socket, TlsMode}; + +lazy_static! { + static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() + .name_prefix("postgres-dns-") + .pool_size(2) + .create(); +} + +#[derive(StateMachineFuture)] +pub enum Connect +where + T: TlsMode, +{ + #[state_machine_future(start)] + #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] + #[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))] + Start { + tls_mode: T, + params: HashMap, + }, + #[cfg(unix)] + #[state_machine_future(transitions(Handshaking))] + ConnectingUnix { + future: tokio_uds::ConnectFuture, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(ConnectingTcp))] + ResolvingDns { + future: CpuFuture, io::Error>, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(Handshaking))] + ConnectingTcp { + future: tokio_tcp::ConnectFuture, + addrs: vec::IntoIter, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(Finished))] + Handshaking { future: HandshakeFuture }, + #[state_machine_future(ready)] + Finished((Client, Connection)), + #[state_machine_future(error)] + Failed(Error), +} + +impl PollConnect for Connect +where + T: TlsMode, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let mut state = state.take(); + + let host = match state.params.remove("host") { + Some(host) => host, + None => return Err(Error::missing_host()), + }; + + let port = match state.params.remove("port") { + Some(port) => port.parse::().map_err(Error::invalid_port)?, + None => 5432, + }; + + #[cfg(unix)] + { + if host.starts_with('/') { + let path = Path::new(&host).join(format!(".s.PGSQL.{}", port)); + transition!(ConnectingUnix { + future: UnixStream::connect(path), + tls_mode: state.tls_mode, + params: state.params, + }) + } + } + + transition!(ResolvingDns { + future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), + tls_mode: state.tls_mode, + params: state.params, + }) + } + + #[cfg(unix)] + fn poll_connecting_unix<'a>( + state: &'a mut RentToOwn<'a, ConnectingUnix>, + ) -> Poll, Error> { + let stream = try_ready!(state.future.poll().map_err(Error::connect)); + let stream = Socket::new_unix(stream); + let state = state.take(); + + transition!(Handshaking { + future: HandshakeFuture::new(stream, state.tls_mode, state.params) + }) + } + + fn poll_resolving_dns<'a>( + state: &'a mut RentToOwn<'a, ResolvingDns>, + ) -> Poll, Error> { + let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); + let state = state.take(); + + let addr = match addrs.next() { + Some(addr) => addr, + None => { + return Err(Error::connect(io::Error::new( + io::ErrorKind::InvalidData, + "resolved 0 addresses", + ))) + } + }; + + transition!(ConnectingTcp { + future: TcpStream::connect(&addr), + addrs, + tls_mode: state.tls_mode, + params: state.params, + }) + } + + fn poll_connecting_tcp<'a>( + state: &'a mut RentToOwn<'a, ConnectingTcp>, + ) -> Poll, Error> { + let stream = loop { + match state.future.poll() { + Ok(Async::Ready(stream)) => break Socket::new_tcp(stream), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + let addr = match state.addrs.next() { + Some(addr) => addr, + None => return Err(Error::connect(e)), + }; + state.future = TcpStream::connect(&addr); + } + } + }; + let state = state.take(); + + transition!(Handshaking { + future: HandshakeFuture::new(stream, state.tls_mode, state.params), + }) + } + + fn poll_handshaking<'a>( + state: &'a mut RentToOwn<'a, Handshaking>, + ) -> Poll, Error> { + let r = try_ready!(state.future.poll()); + + transition!(Finished(r)) + } +} + +impl ConnectFuture +where + T: TlsMode, +{ + pub fn new(tls_mode: T, params: HashMap) -> ConnectFuture { + Connect::start(tls_mode, params) + } +} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 9d19fa0e0..3a13e6bc7 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -22,6 +22,8 @@ mod bind; mod cancel; mod client; mod codec; +#[cfg(feature = "runtime")] +mod connect; mod connection; mod copy_in; mod copy_out; @@ -42,6 +44,8 @@ pub use crate::proto::bind::BindFuture; pub use crate::proto::cancel::CancelFuture; pub use crate::proto::client::Client; pub use crate::proto::codec::PostgresCodec; +#[cfg(feature = "runtime")] +pub use crate::proto::connect::ConnectFuture; pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index ad1736f03..41918e65c 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -14,6 +14,8 @@ use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; mod parse; +#[cfg(feature = "runtime")] +mod runtime; mod types; fn connect( diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs new file mode 100644 index 000000000..f723be7b2 --- /dev/null +++ b/tokio-postgres/tests/test/runtime.rs @@ -0,0 +1,34 @@ +use futures::Future; +use tokio::runtime::current_thread::Runtime; +use tokio_postgres::{Client, Connection, Error, NoTls, Socket}; + +fn connect(s: &str) -> impl Future), Error = Error> { + s.parse::().unwrap().connect(NoTls) +} + +#[test] +#[ignore] // FIXME doesn't work with our docker-based tests :( +fn unix_socket() { + let mut runtime = Runtime::new().unwrap(); + + let connect = connect("host=/var/run/postgresql port=5433 user=postgres"); + let (mut client, connection) = runtime.block_on(connect).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let execute = client.batch_execute("SELECT 1"); + runtime.block_on(execute).unwrap(); +} + +#[test] +fn tcp() { + let mut runtime = Runtime::new().unwrap(); + + let connect = connect("host=localhost port=5433 user=postgres"); + let (mut client, connection) = runtime.block_on(connect).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let execute = client.batch_execute("SELECT 1"); + runtime.block_on(execute).unwrap(); +} From 56088a9a463957434f6f3baa6573ea9d7aa12cc3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 17 Dec 2018 21:59:14 -0800 Subject: [PATCH 050/819] Fix warning --- tokio-postgres/src/error/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index d83c50dd7..3e5f992ba 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -348,6 +348,7 @@ enum Kind { UnsupportedAuthentication, Authentication, ConnectionSyntax, + #[cfg(feature = "runtime")] Connect, #[cfg(feature = "runtime")] MissingHost, @@ -390,6 +391,7 @@ impl fmt::Display for Error { Kind::UnsupportedAuthentication => "unsupported authentication method requested", Kind::Authentication => "authentication error", Kind::ConnectionSyntax => "invalid connection string", + #[cfg(feature = "runtime")] Kind::Connect => "error connecting to server", #[cfg(feature = "runtime")] Kind::MissingHost => "host not provided", From 7e7ae968c190361e9db229c312406152ff140006 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 18 Dec 2018 21:39:05 -0800 Subject: [PATCH 051/819] Prep for multi-host support cc #399 --- tokio-postgres-openssl/Cargo.toml | 4 + tokio-postgres-openssl/src/lib.rs | 55 +++++++ tokio-postgres-openssl/src/test.rs | 23 ++- tokio-postgres/src/builder.rs | 8 +- tokio-postgres/src/lib.rs | 4 +- tokio-postgres/src/proto/connect.rs | 153 ++++---------------- tokio-postgres/src/proto/connect_once.rs | 176 +++++++++++++++++++++++ tokio-postgres/src/proto/mod.rs | 4 + tokio-postgres/src/socket.rs | 2 + tokio-postgres/src/tls.rs | 98 +++++++++++++ 10 files changed, 398 insertions(+), 129 deletions(-) create mode 100644 tokio-postgres/src/proto/connect_once.rs diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index c875b1188..2ce03eafb 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Steven Fackler "] edition = "2018" +[features] +default = ["runtime"] +runtime = ["tokio-postgres/runtime"] + [dependencies] futures = "0.1" openssl = "0.10" diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 08c482ef6..83b7c4c35 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,17 +1,72 @@ #![warn(rust_2018_idioms, clippy::all)] +#[cfg(feature = "runtime")] +use futures::future::{self, FutureResult}; use futures::{try_ready, Async, Future, Poll}; +#[cfg(feature = "runtime")] +use openssl::error::ErrorStack; use openssl::hash::MessageDigest; use openssl::nid::Nid; +#[cfg(feature = "runtime")] +use openssl::ssl::SslConnector; use openssl::ssl::{ConnectConfiguration, HandshakeError, SslRef}; use std::fmt::Debug; +#[cfg(feature = "runtime")] +use std::sync::Arc; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_openssl::{ConnectAsync, ConnectConfigurationExt, SslStream}; +#[cfg(feature = "runtime")] +use tokio_postgres::MakeTlsConnect; use tokio_postgres::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; +#[cfg(feature = "runtime")] +#[derive(Clone)] +pub struct MakeTlsConnector { + connector: SslConnector, + config: Arc Result<(), ErrorStack> + Sync + Send>, +} + +#[cfg(feature = "runtime")] +impl MakeTlsConnector { + pub fn new(connector: SslConnector) -> MakeTlsConnector { + MakeTlsConnector { + connector, + config: Arc::new(|_| Ok(())), + } + } + + pub fn set_callback(&mut self, f: F) + where + F: Fn(&mut ConnectConfiguration) -> Result<(), ErrorStack> + 'static + Sync + Send, + { + self.config = Arc::new(f); + } + + fn make_tls_connect_inner(&mut self, domain: &str) -> Result { + let mut ssl = self.connector.configure()?; + (self.config)(&mut ssl)?; + Ok(TlsConnector::new(ssl, domain)) + } +} + +#[cfg(feature = "runtime")] +impl MakeTlsConnect for MakeTlsConnector +where + S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, +{ + type Stream = SslStream; + type TlsConnect = TlsConnector; + type Error = ErrorStack; + type Future = FutureResult; + + fn make_tls_connect(&mut self, domain: &str) -> FutureResult { + future::result(self.make_tls_connect_inner(domain)) + } +} + pub struct TlsConnector { ssl: ConnectConfiguration, domain: String, diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 6729916b7..28506735a 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -4,7 +4,7 @@ use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; -use crate::TlsConnector; +use super::*; fn smoke_test(builder: &tokio_postgres::Builder, tls: T) where @@ -72,3 +72,24 @@ fn scram_user() { RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } + +#[test] +#[cfg(feature = "runtime")] +fn runtime() { + let mut runtime = Runtime::new().unwrap(); + + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_ca_file("../test/server.crt").unwrap(); + let connector = MakeTlsConnector::new(builder.build()); + + let connect = "host=localhost port=5433 user=postgres" + .parse::() + .unwrap() + .connect(RequireTls(connector)); + let (mut client, connection) = runtime.block_on(connect).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let execute = client.batch_execute("SELECT 1"); + runtime.block_on(execute).unwrap(); +} diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 3090f09ae..69d999bed 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -7,7 +7,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::ConnectFuture; use crate::proto::HandshakeFuture; #[cfg(feature = "runtime")] -use crate::{Connect, Socket}; +use crate::{Connect, MakeTlsMode, Socket}; use crate::{Error, Handshake, TlsMode}; #[derive(Clone)] @@ -61,11 +61,11 @@ impl Builder { } #[cfg(feature = "runtime")] - pub fn connect(&self, tls_mode: T) -> Connect + pub fn connect(&self, make_tls_mode: T) -> Connect where - T: TlsMode, + T: MakeTlsMode, { - Connect(ConnectFuture::new(tls_mode, self.params.clone())) + Connect(ConnectFuture::new(make_tls_mode, self.params.clone())) } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d2bfec1a9..2a0010aeb 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -184,12 +184,12 @@ where #[must_use = "futures do nothing unless polled"] pub struct Connect(proto::ConnectFuture) where - T: TlsMode; + T: MakeTlsMode; #[cfg(feature = "runtime")] impl Future for Connect where - T: TlsMode, + T: MakeTlsMode, { type Item = (Client, Connection); type Error = Error; diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index ad21e9dfc..5fdd37c37 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,61 +1,31 @@ -use futures::{try_ready, Async, Future, Poll}; -use futures_cpupool::{CpuFuture, CpuPool}; -use lazy_static::lazy_static; +use futures::{try_ready, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::collections::HashMap; -use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; -#[cfg(unix)] -use std::path::Path; -use std::vec; -use tokio_tcp::TcpStream; -#[cfg(unix)] -use tokio_uds::UnixStream; -use crate::proto::{Client, Connection, HandshakeFuture}; -use crate::{Error, Socket, TlsMode}; - -lazy_static! { - static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() - .name_prefix("postgres-dns-") - .pool_size(2) - .create(); -} +use crate::proto::{Client, ConnectOnceFuture, Connection}; +use crate::{Error, MakeTlsMode, Socket}; #[derive(StateMachineFuture)] pub enum Connect where - T: TlsMode, + T: MakeTlsMode, { - #[state_machine_future(start)] - #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] - #[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))] + #[state_machine_future(start, transitions(MakingTlsMode))] Start { - tls_mode: T, - params: HashMap, - }, - #[cfg(unix)] - #[state_machine_future(transitions(Handshaking))] - ConnectingUnix { - future: tokio_uds::ConnectFuture, - tls_mode: T, - params: HashMap, - }, - #[state_machine_future(transitions(ConnectingTcp))] - ResolvingDns { - future: CpuFuture, io::Error>, - tls_mode: T, + make_tls_mode: T, params: HashMap, }, - #[state_machine_future(transitions(Handshaking))] - ConnectingTcp { - future: tokio_tcp::ConnectFuture, - addrs: vec::IntoIter, - tls_mode: T, + #[state_machine_future(transitions(Connecting))] + MakingTlsMode { + future: T::Future, + host: String, + port: u16, params: HashMap, }, #[state_machine_future(transitions(Finished))] - Handshaking { future: HandshakeFuture }, + Connecting { + future: ConnectOnceFuture, + }, #[state_machine_future(ready)] Finished((Client, Connection)), #[state_machine_future(error)] @@ -64,7 +34,7 @@ where impl PollConnect for Connect where - T: TlsMode, + T: MakeTlsMode, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); @@ -79,99 +49,38 @@ where None => 5432, }; - #[cfg(unix)] - { - if host.starts_with('/') { - let path = Path::new(&host).join(format!(".s.PGSQL.{}", port)); - transition!(ConnectingUnix { - future: UnixStream::connect(path), - tls_mode: state.tls_mode, - params: state.params, - }) - } - } - - transition!(ResolvingDns { - future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), - tls_mode: state.tls_mode, + transition!(MakingTlsMode { + future: state.make_tls_mode.make_tls_mode(&host), + host, + port, params: state.params, }) } - #[cfg(unix)] - fn poll_connecting_unix<'a>( - state: &'a mut RentToOwn<'a, ConnectingUnix>, - ) -> Poll, Error> { - let stream = try_ready!(state.future.poll().map_err(Error::connect)); - let stream = Socket::new_unix(stream); + fn poll_making_tls_mode<'a>( + state: &'a mut RentToOwn<'a, MakingTlsMode>, + ) -> Poll, Error> { + let tls_mode = try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); let state = state.take(); - transition!(Handshaking { - future: HandshakeFuture::new(stream, state.tls_mode, state.params) + transition!(Connecting { + future: ConnectOnceFuture::new(state.host, state.port, tls_mode, state.params), }) } - fn poll_resolving_dns<'a>( - state: &'a mut RentToOwn<'a, ResolvingDns>, - ) -> Poll, Error> { - let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); - let state = state.take(); - - let addr = match addrs.next() { - Some(addr) => addr, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::InvalidData, - "resolved 0 addresses", - ))) - } - }; - - transition!(ConnectingTcp { - future: TcpStream::connect(&addr), - addrs, - tls_mode: state.tls_mode, - params: state.params, - }) - } - - fn poll_connecting_tcp<'a>( - state: &'a mut RentToOwn<'a, ConnectingTcp>, - ) -> Poll, Error> { - let stream = loop { - match state.future.poll() { - Ok(Async::Ready(stream)) => break Socket::new_tcp(stream), - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(e) => { - let addr = match state.addrs.next() { - Some(addr) => addr, - None => return Err(Error::connect(e)), - }; - state.future = TcpStream::connect(&addr); - } - } - }; - let state = state.take(); - - transition!(Handshaking { - future: HandshakeFuture::new(stream, state.tls_mode, state.params), - }) - } - - fn poll_handshaking<'a>( - state: &'a mut RentToOwn<'a, Handshaking>, - ) -> Poll, Error> { + fn poll_connecting<'a>( + state: &'a mut RentToOwn<'a, Connecting>, + ) -> Poll, Error> { let r = try_ready!(state.future.poll()); - transition!(Finished(r)) } } impl ConnectFuture where - T: TlsMode, + T: MakeTlsMode, { - pub fn new(tls_mode: T, params: HashMap) -> ConnectFuture { - Connect::start(tls_mode, params) + pub fn new(make_tls_mode: T, params: HashMap) -> ConnectFuture { + Connect::start(make_tls_mode, params) } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs new file mode 100644 index 000000000..929428699 --- /dev/null +++ b/tokio-postgres/src/proto/connect_once.rs @@ -0,0 +1,176 @@ +use futures::{try_ready, Async, Future, Poll}; +use futures_cpupool::{CpuFuture, CpuPool}; +use lazy_static::lazy_static; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use std::collections::HashMap; +use std::io; +use std::net::{SocketAddr, ToSocketAddrs}; +#[cfg(unix)] +use std::path::Path; +use std::vec; +use tokio_tcp::TcpStream; +#[cfg(unix)] +use tokio_uds::UnixStream; + +use crate::proto::{Client, Connection, HandshakeFuture}; +use crate::{Error, Socket, TlsMode}; + +lazy_static! { + static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() + .name_prefix("postgres-dns-") + .pool_size(2) + .create(); +} + +#[derive(StateMachineFuture)] +pub enum ConnectOnce +where + T: TlsMode, +{ + #[state_machine_future(start)] + #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] + #[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))] + Start { + host: String, + port: u16, + tls_mode: T, + params: HashMap, + }, + #[cfg(unix)] + #[state_machine_future(transitions(Handshaking))] + ConnectingUnix { + future: tokio_uds::ConnectFuture, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(ConnectingTcp))] + ResolvingDns { + future: CpuFuture, io::Error>, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(Handshaking))] + ConnectingTcp { + future: tokio_tcp::ConnectFuture, + addrs: vec::IntoIter, + tls_mode: T, + params: HashMap, + }, + #[state_machine_future(transitions(Finished))] + Handshaking { future: HandshakeFuture }, + #[state_machine_future(ready)] + Finished((Client, Connection)), + #[state_machine_future(error)] + Failed(Error), +} + +impl PollConnectOnce for ConnectOnce +where + T: TlsMode, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let state = state.take(); + + #[cfg(unix)] + { + if state.host.starts_with('/') { + let path = Path::new(&state.host).join(format!(".s.PGSQL.{}", state.port)); + transition!(ConnectingUnix { + future: UnixStream::connect(path), + tls_mode: state.tls_mode, + params: state.params, + }) + } + } + + let host = state.host; + let port = state.port; + transition!(ResolvingDns { + future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), + tls_mode: state.tls_mode, + params: state.params, + }) + } + + #[cfg(unix)] + fn poll_connecting_unix<'a>( + state: &'a mut RentToOwn<'a, ConnectingUnix>, + ) -> Poll, Error> { + let stream = try_ready!(state.future.poll().map_err(Error::connect)); + let stream = Socket::new_unix(stream); + let state = state.take(); + + transition!(Handshaking { + future: HandshakeFuture::new(stream, state.tls_mode, state.params) + }) + } + + fn poll_resolving_dns<'a>( + state: &'a mut RentToOwn<'a, ResolvingDns>, + ) -> Poll, Error> { + let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); + let state = state.take(); + + let addr = match addrs.next() { + Some(addr) => addr, + None => { + return Err(Error::connect(io::Error::new( + io::ErrorKind::InvalidData, + "resolved 0 addresses", + ))) + } + }; + + transition!(ConnectingTcp { + future: TcpStream::connect(&addr), + addrs, + tls_mode: state.tls_mode, + params: state.params, + }) + } + + fn poll_connecting_tcp<'a>( + state: &'a mut RentToOwn<'a, ConnectingTcp>, + ) -> Poll, Error> { + let stream = loop { + match state.future.poll() { + Ok(Async::Ready(stream)) => break Socket::new_tcp(stream), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + let addr = match state.addrs.next() { + Some(addr) => addr, + None => return Err(Error::connect(e)), + }; + state.future = TcpStream::connect(&addr); + } + } + }; + let state = state.take(); + + transition!(Handshaking { + future: HandshakeFuture::new(stream, state.tls_mode, state.params), + }) + } + + fn poll_handshaking<'a>( + state: &'a mut RentToOwn<'a, Handshaking>, + ) -> Poll, Error> { + let r = try_ready!(state.future.poll()); + + transition!(Finished(r)) + } +} + +impl ConnectOnceFuture +where + T: TlsMode, +{ + pub fn new( + host: String, + port: u16, + tls_mode: T, + params: HashMap, + ) -> ConnectOnceFuture { + ConnectOnce::start(host, port, tls_mode, params) + } +} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 3a13e6bc7..de81620e4 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -24,6 +24,8 @@ mod client; mod codec; #[cfg(feature = "runtime")] mod connect; +#[cfg(feature = "runtime")] +mod connect_once; mod connection; mod copy_in; mod copy_out; @@ -46,6 +48,8 @@ pub use crate::proto::client::Client; pub use crate::proto::codec::PostgresCodec; #[cfg(feature = "runtime")] pub use crate::proto::connect::ConnectFuture; +#[cfg(feature = "runtime")] +pub use crate::proto::connect_once::ConnectOnceFuture; pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index 32d30b24c..641562df5 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -6,12 +6,14 @@ use tokio_tcp::TcpStream; #[cfg(unix)] use tokio_uds::UnixStream; +#[derive(Debug)] enum Inner { Tcp(TcpStream), #[cfg(unix)] Unix(UnixStream), } +#[derive(Debug)] pub struct Socket(Inner); impl Socket { diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 50dc8e26c..c6a853b49 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -25,6 +25,16 @@ impl ChannelBinding { } } +#[cfg(feature = "runtime")] +pub trait MakeTlsMode { + type Stream: AsyncRead + AsyncWrite; + type TlsMode: TlsMode; + type Error: Into>; + type Future: Future; + + fn make_tls_mode(&mut self, domain: &str) -> Self::Future; +} + pub trait TlsMode { type Stream: AsyncRead + AsyncWrite; type Error: Into>; @@ -35,6 +45,16 @@ pub trait TlsMode { fn handle_tls(self, use_tls: bool, stream: S) -> Self::Future; } +#[cfg(feature = "runtime")] +pub trait MakeTlsConnect { + type Stream: AsyncRead + AsyncWrite; + type TlsConnect: TlsConnect; + type Error: Into>; + type Future: Future; + + fn make_tls_connect(&mut self, domain: &str) -> Self::Future; +} + pub trait TlsConnect { type Stream: AsyncRead + AsyncWrite; type Error: Into>; @@ -46,6 +66,21 @@ pub trait TlsConnect { #[derive(Debug, Copy, Clone)] pub struct NoTls; +#[cfg(feature = "runtime")] +impl MakeTlsMode for NoTls +where + S: AsyncRead + AsyncWrite, +{ + type Stream = S; + type TlsMode = NoTls; + type Error = Void; + type Future = FutureResult; + + fn make_tls_mode(&mut self, _: &str) -> FutureResult { + future::ok(NoTls) + } +} + impl TlsMode for NoTls where S: AsyncRead + AsyncWrite, @@ -68,6 +103,38 @@ where #[derive(Debug, Copy, Clone)] pub struct PreferTls(pub T); +#[cfg(feature = "runtime")] +impl MakeTlsMode for PreferTls +where + T: MakeTlsConnect, + S: AsyncRead + AsyncWrite, +{ + type Stream = MaybeTlsStream; + type TlsMode = PreferTls; + type Error = T::Error; + type Future = MakePreferTlsFuture; + + fn make_tls_mode(&mut self, domain: &str) -> MakePreferTlsFuture { + MakePreferTlsFuture(self.0.make_tls_connect(domain)) + } +} + +#[cfg(feature = "runtime")] +pub struct MakePreferTlsFuture(F); + +#[cfg(feature = "runtime")] +impl Future for MakePreferTlsFuture +where + F: Future, +{ + type Item = PreferTls; + type Error = F::Error; + + fn poll(&mut self) -> Poll, F::Error> { + self.0.poll().map(|f| f.map(PreferTls)) + } +} + impl TlsMode for PreferTls where T: TlsConnect, @@ -207,6 +274,37 @@ where #[derive(Debug, Copy, Clone)] pub struct RequireTls(pub T); +#[cfg(feature = "runtime")] +impl MakeTlsMode for RequireTls +where + T: MakeTlsConnect, +{ + type Stream = T::Stream; + type TlsMode = RequireTls; + type Error = T::Error; + type Future = MakeRequireTlsFuture; + + fn make_tls_mode(&mut self, domain: &str) -> MakeRequireTlsFuture { + MakeRequireTlsFuture(self.0.make_tls_connect(domain)) + } +} + +#[cfg(feature = "runtime")] +pub struct MakeRequireTlsFuture(F); + +#[cfg(feature = "runtime")] +impl Future for MakeRequireTlsFuture +where + F: Future, +{ + type Item = RequireTls; + type Error = F::Error; + + fn poll(&mut self) -> Poll, F::Error> { + self.0.poll().map(|f| f.map(RequireTls)) + } +} + impl TlsMode for RequireTls where T: TlsConnect, From 23b0d6e6f30548c4a57cfedf56d179b0626011eb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 19 Dec 2018 20:18:48 -0800 Subject: [PATCH 052/819] Support multiple hosts when connecting cc #399 --- tokio-postgres/src/error/mod.rs | 9 ++++ tokio-postgres/src/proto/connect.rs | 71 ++++++++++++++++++++++++---- tokio-postgres/tests/test/runtime.rs | 39 ++++++++++----- 3 files changed, 98 insertions(+), 21 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 3e5f992ba..6a8d5d3df 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -354,6 +354,8 @@ enum Kind { MissingHost, #[cfg(feature = "runtime")] InvalidPort, + #[cfg(feature = "runtime")] + InvalidPortCount, } struct ErrorInner { @@ -397,6 +399,8 @@ impl fmt::Display for Error { Kind::MissingHost => "host not provided", #[cfg(feature = "runtime")] Kind::InvalidPort => "invalid port", + #[cfg(feature = "runtime")] + Kind::InvalidPortCount => "wrong number of ports provided", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -514,4 +518,9 @@ impl Error { pub(crate) fn invalid_port(e: ParseIntError) -> Error { Error::new(Kind::InvalidPort, Some(Box::new(e))) } + + #[cfg(feature = "runtime")] + pub(crate) fn invalid_port_count() -> Error { + Error::new(Kind::InvalidPortCount, None) + } } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 5fdd37c37..167a6a429 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,6 +1,7 @@ -use futures::{try_ready, Future, Poll}; +use futures::{try_ready, Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::collections::HashMap; +use std::vec; use crate::proto::{Client, ConnectOnceFuture, Connection}; use crate::{Error, MakeTlsMode, Socket}; @@ -20,11 +21,16 @@ where future: T::Future, host: String, port: u16, + addrs: vec::IntoIter<(String, u16)>, + make_tls_mode: T, params: HashMap, }, - #[state_machine_future(transitions(Finished))] + #[state_machine_future(transitions(MakingTlsMode, Finished))] Connecting { future: ConnectOnceFuture, + addrs: vec::IntoIter<(String, u16)>, + make_tls_mode: T, + params: HashMap, }, #[state_machine_future(ready)] Finished((Client, Connection)), @@ -43,16 +49,42 @@ where Some(host) => host, None => return Err(Error::missing_host()), }; + let mut addrs = host + .split(',') + .map(|s| (s.to_string(), 0u16)) + .collect::>(); - let port = match state.params.remove("port") { - Some(port) => port.parse::().map_err(Error::invalid_port)?, - None => 5432, - }; + let port = state.params.remove("port").unwrap_or_else(String::new); + let mut ports = port + .split(',') + .map(|s| { + if s.is_empty() { + Ok(5432) + } else { + s.parse::().map_err(Error::invalid_port) + } + }) + .collect::, _>>()?; + if ports.len() == 1 { + ports.resize(addrs.len(), ports[0]); + } + if addrs.len() != ports.len() { + return Err(Error::invalid_port_count()); + } + + for (addr, port) in addrs.iter_mut().zip(ports) { + addr.1 = port; + } + + let mut addrs = addrs.into_iter(); + let (host, port) = addrs.next().expect("addrs cannot be empty"); transition!(MakingTlsMode { future: state.make_tls_mode.make_tls_mode(&host), host, port, + addrs, + make_tls_mode: state.make_tls_mode, params: state.params, }) } @@ -64,15 +96,36 @@ where let state = state.take(); transition!(Connecting { - future: ConnectOnceFuture::new(state.host, state.port, tls_mode, state.params), + future: ConnectOnceFuture::new(state.host, state.port, tls_mode, state.params.clone()), + addrs: state.addrs, + make_tls_mode: state.make_tls_mode, + params: state.params, }) } fn poll_connecting<'a>( state: &'a mut RentToOwn<'a, Connecting>, ) -> Poll, Error> { - let r = try_ready!(state.future.poll()); - transition!(Finished(r)) + match state.future.poll() { + Ok(Async::Ready(r)) => transition!(Finished(r)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(e) => { + let mut state = state.take(); + let (host, port) = match state.addrs.next() { + Some(addr) => addr, + None => return Err(e), + }; + + transition!(MakingTlsMode { + future: state.make_tls_mode.make_tls_mode(&host), + host, + port, + addrs: state.addrs, + make_tls_mode: state.make_tls_mode, + params: state.params, + }) + } + } } } diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index f723be7b2..576ca02ff 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -6,12 +6,9 @@ fn connect(s: &str) -> impl Future), Error = s.parse::().unwrap().connect(NoTls) } -#[test] -#[ignore] // FIXME doesn't work with our docker-based tests :( -fn unix_socket() { +fn smoke_test(s: &str) { let mut runtime = Runtime::new().unwrap(); - - let connect = connect("host=/var/run/postgresql port=5433 user=postgres"); + let connect = connect(s); let (mut client, connection) = runtime.block_on(connect).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -20,15 +17,33 @@ fn unix_socket() { runtime.block_on(execute).unwrap(); } +#[test] +#[ignore] // FIXME doesn't work with our docker-based tests :( +fn unix_socket() { + smoke_test("host=/var/run/postgresql port=5433 user=postgres"); +} + #[test] fn tcp() { - let mut runtime = Runtime::new().unwrap(); + smoke_test("host=localhost port=5433 user=postgres") +} - let connect = connect("host=localhost port=5433 user=postgres"); - let (mut client, connection) = runtime.block_on(connect).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); +#[test] +fn multiple_hosts_one_port() { + smoke_test("host=foobar.invalid,localhost port=5433 user=postgres"); +} - let execute = client.batch_execute("SELECT 1"); - runtime.block_on(execute).unwrap(); +#[test] +fn multiple_hosts_multiple_ports() { + smoke_test("host=foobar.invalid,localhost port=5432,5433 user=postgres"); +} + +#[test] +fn wrong_port_count() { + let mut runtime = Runtime::new().unwrap(); + let f = connect("host=localhost port=5433,5433 user=postgres"); + runtime.block_on(f).err().unwrap(); + + let f = connect("host=localhost,localhost,localhost port=5433,5433 user=postgres"); + runtime.block_on(f).err().unwrap(); } From 7d20064bd06a878e50f4e2a23461d55a5d537c4d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 20 Dec 2018 20:36:14 -0800 Subject: [PATCH 053/819] Rename into_cause to into_source --- tokio-postgres/src/error/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 6a8d5d3df..bc3913a46 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -418,7 +418,7 @@ impl error::Error for Error { impl Error { /// Consumes the error, returning its cause. - pub fn into_cause(self) -> Option> { + pub fn into_source(self) -> Option> { self.0.cause } From 759256010d1a9c2eafbad1043126ada939a17cab Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 21 Dec 2018 13:34:09 -0800 Subject: [PATCH 054/819] Initial sync crate sketch --- Cargo.toml | 1 + postgres/Cargo.toml | 18 ++++++++++++ postgres/src/builder.rs | 50 ++++++++++++++++++++++++++++++++ postgres/src/client.rs | 60 +++++++++++++++++++++++++++++++++++++++ postgres/src/lib.rs | 22 ++++++++++++++ postgres/src/statement.rs | 14 +++++++++ 6 files changed, 165 insertions(+) create mode 100644 postgres/Cargo.toml create mode 100644 postgres/src/builder.rs create mode 100644 postgres/src/client.rs create mode 100644 postgres/src/lib.rs create mode 100644 postgres/src/statement.rs diff --git a/Cargo.toml b/Cargo.toml index 86e7becf2..40e30b1e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "codegen", + "postgres", "postgres-protocol", "tokio-postgres", "tokio-postgres-native-tls", diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml new file mode 100644 index 000000000..03176d028 --- /dev/null +++ b/postgres/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "postgres" +version = "0.1.0" +authors = ["Steven Fackler "] +edition = "2018" + +[features] +default = ["runtime"] + +runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] + +[dependencies] +futures = "0.1" +tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } + +tokio = { version = "0.1", optional = true } +lazy_static = { version = "1.0", optional = true } +log = { version = "0.4", optional = true } diff --git a/postgres/src/builder.rs b/postgres/src/builder.rs new file mode 100644 index 000000000..43f35e6dd --- /dev/null +++ b/postgres/src/builder.rs @@ -0,0 +1,50 @@ +use futures::sync::oneshot; +use futures::Future; +use log::error; +use std::str::FromStr; +use tokio_postgres::{Error, MakeTlsMode, Socket, TlsMode}; + +use crate::{Client, RUNTIME}; + +pub struct Builder(tokio_postgres::Builder); + +impl Default for Builder { + fn default() -> Builder { + Builder(tokio_postgres::Builder::default()) + } +} + +impl Builder { + pub fn new() -> Builder { + Builder(tokio_postgres::Builder::new()) + } + + pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + self.0.param(key, value); + self + } + + pub fn connect(&self, tls_mode: T) -> Result + where + T: MakeTlsMode + 'static + Send, + T::TlsMode: Send, + T::Stream: Send, + T::Future: Send, + >::Future: Send, + { + let connect = self.0.connect(tls_mode); + let (client, connection) = oneshot::spawn(connect, &RUNTIME.executor()).wait()?; + let connection = connection.map_err(|e| error!("postgres connection error: {}", e)); + RUNTIME.executor().spawn(connection); + + Ok(Client::from(client)) + } +} + +impl FromStr for Builder { + type Err = Error; + + fn from_str(s: &str) -> Result { + s.parse().map(Builder) + } +} diff --git a/postgres/src/client.rs b/postgres/src/client.rs new file mode 100644 index 000000000..2dce6d941 --- /dev/null +++ b/postgres/src/client.rs @@ -0,0 +1,60 @@ +use futures::{Future, Stream}; +use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::{Error, Row}; +#[cfg(feature = "runtime")] +use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; + +#[cfg(feature = "runtime")] +use crate::Builder; +use crate::Statement; + +pub struct Client(tokio_postgres::Client); + +impl Client { + #[cfg(feature = "runtime")] + pub fn connect(params: &str, tls_mode: T) -> Result + where + T: MakeTlsMode + 'static + Send, + T::TlsMode: Send, + T::Stream: Send, + T::Future: Send, + >::Future: Send, + { + params.parse::()?.connect(tls_mode) + } + + #[cfg(feature = "runtime")] + pub fn builder() -> Builder { + Builder::new() + } + + pub fn prepare(&mut self, query: &str) -> Result { + self.0.prepare(query).wait().map(Statement) + } + + pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { + self.0.prepare_typed(query, types).wait().map(Statement) + } + + pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Result { + self.0.execute(&statement.0, params).wait() + } + + pub fn query( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> Result, Error> { + self.0.query(&statement.0, params).collect().wait() + } + + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + self.0.batch_execute(query).wait() + } +} + +impl From for Client { + fn from(c: tokio_postgres::Client) -> Client { + Client(c) + } +} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs new file mode 100644 index 000000000..1072ea7cd --- /dev/null +++ b/postgres/src/lib.rs @@ -0,0 +1,22 @@ +#[cfg(feature = "runtime")] +use lazy_static::lazy_static; +#[cfg(feature = "runtime")] +use tokio::runtime::{self, Runtime}; + +#[cfg(feature = "runtime")] +mod builder; +mod client; +mod statement; + +#[cfg(feature = "runtime")] +pub use crate::builder::*; +pub use crate::client::*; +pub use crate::statement::*; + +#[cfg(feature = "runtime")] +lazy_static! { + static ref RUNTIME: Runtime = runtime::Builder::new() + .name_prefix("postgres-") + .build() + .unwrap(); +} diff --git a/postgres/src/statement.rs b/postgres/src/statement.rs new file mode 100644 index 000000000..bcb746149 --- /dev/null +++ b/postgres/src/statement.rs @@ -0,0 +1,14 @@ +use tokio_postgres::types::Type; +use tokio_postgres::Column; + +pub struct Statement(pub(crate) tokio_postgres::Statement); + +impl Statement { + pub fn params(&self) -> &[Type] { + self.0.params() + } + + pub fn columns(&self) -> &[Column] { + self.0.columns() + } +} From 44fa44a30791988adbb49e1cd05663aa9d2aaebf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 21 Dec 2018 13:46:50 -0800 Subject: [PATCH 055/819] Sync transactions --- postgres/src/client.rs | 7 +++- postgres/src/lib.rs | 2 ++ postgres/src/transaction.rs | 64 +++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 postgres/src/transaction.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2dce6d941..ffa3169e2 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -6,7 +6,7 @@ use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] use crate::Builder; -use crate::Statement; +use crate::{Statement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -51,6 +51,11 @@ impl Client { pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.0.batch_execute(query).wait() } + + pub fn transaction(&mut self) -> Result, Error> { + self.batch_execute("BEGIN")?; + Ok(Transaction::new(self)) + } } impl From for Client { diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 1072ea7cd..acd88f4d9 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -7,11 +7,13 @@ use tokio::runtime::{self, Runtime}; mod builder; mod client; mod statement; +mod transaction; #[cfg(feature = "runtime")] pub use crate::builder::*; pub use crate::client::*; pub use crate::statement::*; +pub use crate::transaction::*; #[cfg(feature = "runtime")] lazy_static! { diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs new file mode 100644 index 000000000..56d2dcd35 --- /dev/null +++ b/postgres/src/transaction.rs @@ -0,0 +1,64 @@ +use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::{Error, Row}; + +use crate::{Client, Statement}; + +pub struct Transaction<'a> { + client: &'a mut Client, + done: bool, +} + +impl<'a> Drop for Transaction<'a> { + fn drop(&mut self) { + if !self.done { + let _ = self.rollback_inner(); + } + } +} + +impl<'a> Transaction<'a> { + pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { + Transaction { + client, + done: false, + } + } + + pub fn commit(mut self) -> Result<(), Error> { + self.done = true; + self.client.batch_execute("COMMIT") + } + + pub fn rollback(mut self) -> Result<(), Error> { + self.done = true; + self.rollback_inner() + } + + fn rollback_inner(&mut self) -> Result<(), Error> { + self.client.batch_execute("ROLLBACK") + } + + pub fn prepare(&mut self, query: &str) -> Result { + self.client.prepare(query) + } + + pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { + self.client.prepare_typed(query, types) + } + + pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Result { + self.client.execute(statement, params) + } + + pub fn query( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> Result, Error> { + self.client.query(statement, params) + } + + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + self.client.batch_execute(query) + } +} From 26eb27e70d125d8c8b52e1c4e86cb8d0413e1413 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 21 Dec 2018 20:13:15 -0800 Subject: [PATCH 056/819] Support one-off queries in sync API We can't do this in tokio-postgres while borrowing the parameters, but it's fine in the sync API! --- postgres/src/client.rs | 18 +++++++++++------- postgres/src/lib.rs | 2 ++ postgres/src/query.rs | 28 ++++++++++++++++++++++++++++ postgres/src/statement.rs | 1 + postgres/src/transaction.rs | 20 +++++++++++--------- tokio-postgres/src/lib.rs | 1 + 6 files changed, 54 insertions(+), 16 deletions(-) create mode 100644 postgres/src/query.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index ffa3169e2..2553c249d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -6,7 +6,7 @@ use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] use crate::Builder; -use crate::{Statement, Transaction}; +use crate::{Query, Statement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -36,15 +36,19 @@ impl Client { self.0.prepare_typed(query, types).wait().map(Statement) } - pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Result { + pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + where + T: Query, + { + let statement = query.__statement(self)?; self.0.execute(&statement.0, params).wait() } - pub fn query( - &mut self, - statement: &Statement, - params: &[&dyn ToSql], - ) -> Result, Error> { + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + where + T: Query, + { + let statement = query.__statement(self)?; self.0.query(&statement.0, params).collect().wait() } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index acd88f4d9..60e249a31 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -6,12 +6,14 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] mod builder; mod client; +mod query; mod statement; mod transaction; #[cfg(feature = "runtime")] pub use crate::builder::*; pub use crate::client::*; +pub use crate::query::*; pub use crate::statement::*; pub use crate::transaction::*; diff --git a/postgres/src/query.rs b/postgres/src/query.rs new file mode 100644 index 000000000..4910fcd0d --- /dev/null +++ b/postgres/src/query.rs @@ -0,0 +1,28 @@ +use tokio_postgres::Error; + +use crate::{Client, Statement}; + +mod sealed { + pub trait Sealed {} +} + +pub trait Query: sealed::Sealed { + #[doc(hidden)] + fn __statement(&self, client: &mut Client) -> Result; +} + +impl sealed::Sealed for str {} + +impl Query for str { + fn __statement(&self, client: &mut Client) -> Result { + client.prepare(self) + } +} + +impl sealed::Sealed for Statement {} + +impl Query for Statement { + fn __statement(&self, _: &mut Client) -> Result { + Ok(self.clone()) + } +} diff --git a/postgres/src/statement.rs b/postgres/src/statement.rs index bcb746149..b6abe2a51 100644 --- a/postgres/src/statement.rs +++ b/postgres/src/statement.rs @@ -1,6 +1,7 @@ use tokio_postgres::types::Type; use tokio_postgres::Column; +#[derive(Clone)] pub struct Statement(pub(crate) tokio_postgres::Statement); impl Statement { diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 56d2dcd35..e772fb742 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,7 +1,7 @@ use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; -use crate::{Client, Statement}; +use crate::{Client, Query, Statement}; pub struct Transaction<'a> { client: &'a mut Client, @@ -46,16 +46,18 @@ impl<'a> Transaction<'a> { self.client.prepare_typed(query, types) } - pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Result { - self.client.execute(statement, params) + pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + where + T: Query, + { + self.client.execute(query, params) } - pub fn query( - &mut self, - statement: &Statement, - params: &[&dyn ToSql], - ) -> Result, Error> { - self.client.query(statement, params) + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + where + T: Query, + { + self.client.query(query, params) } pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2a0010aeb..449cfa482 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -215,6 +215,7 @@ impl Future for Prepare { } } +#[derive(Clone)] pub struct Statement(proto::Statement); impl Statement { From 633e87aefce945949eb256850e52d344223ea80a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 21 Dec 2018 21:01:49 -0800 Subject: [PATCH 057/819] Add basic sync API tests --- postgres/src/client.rs | 4 +- postgres/src/lib.rs | 4 ++ postgres/src/test.rs | 95 +++++++++++++++++++++++++++++++++++++ postgres/src/transaction.rs | 4 +- 4 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 postgres/src/test.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2553c249d..2ffe9caa5 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -38,7 +38,7 @@ impl Client { pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where - T: Query, + T: ?Sized + Query, { let statement = query.__statement(self)?; self.0.execute(&statement.0, params).wait() @@ -46,7 +46,7 @@ impl Client { pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where - T: Query, + T: ?Sized + Query, { let statement = query.__statement(self)?; self.0.query(&statement.0, params).collect().wait() diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 60e249a31..12967cdc9 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -10,6 +10,10 @@ mod query; mod statement; mod transaction; +#[cfg(feature = "runtime")] +#[cfg(test)] +mod test; + #[cfg(feature = "runtime")] pub use crate::builder::*; pub use crate::client::*; diff --git a/postgres/src/test.rs b/postgres/src/test.rs new file mode 100644 index 000000000..db7cc1d35 --- /dev/null +++ b/postgres/src/test.rs @@ -0,0 +1,95 @@ +use tokio_postgres::types::Type; +use tokio_postgres::NoTls; + +use super::*; + +#[test] +fn prepare() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + let stmt = client.prepare("SELECT 1::INT, $1::TEXT").unwrap(); + assert_eq!(stmt.params(), &[Type::TEXT]); + assert_eq!(stmt.columns().len(), 2); + assert_eq!(stmt.columns()[0].type_(), &Type::INT4); + assert_eq!(stmt.columns()[1].type_(), &Type::TEXT); +} + +#[test] +fn query_prepared() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + let stmt = client.prepare("SELECT $1::TEXT").unwrap(); + let rows = client.query(&stmt, &[&"hello"]).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, &str>(0), "hello"); +} + +#[test] +fn query_unprepared() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + let rows = client.query("SELECT $1::TEXT", &[&"hello"]).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, &str>(0), "hello"); +} + +#[test] +fn transaction_commit() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo DEFAULT VALUES", &[]) + .unwrap(); + + transaction.commit().unwrap(); + + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); +} + +#[test] +fn transaction_rollback() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo DEFAULT VALUES", &[]) + .unwrap(); + + transaction.rollback().unwrap(); + + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + assert_eq!(rows.len(), 0); +} + +#[test] +fn transaction_drop() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo DEFAULT VALUES", &[]) + .unwrap(); + + drop(transaction); + + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + assert_eq!(rows.len(), 0); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index e772fb742..b61b5d585 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -48,14 +48,14 @@ impl<'a> Transaction<'a> { pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where - T: Query, + T: ?Sized + Query, { self.client.execute(query, params) } pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where - T: Query, + T: ?Sized + Query, { self.client.query(query, params) } From b9e8b4868bce4bd9fdbc7c5ad0bd42c6aecb2bdc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 21 Dec 2018 21:08:26 -0800 Subject: [PATCH 058/819] Add Client::is_closed --- postgres/src/client.rs | 4 ++++ tokio-postgres/src/lib.rs | 4 ++++ tokio-postgres/src/proto/client.rs | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2ffe9caa5..8dd7b05df 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -60,6 +60,10 @@ impl Client { self.batch_execute("BEGIN")?; Ok(Transaction::new(self)) } + + pub fn is_closed(&self) -> bool { + self.0.is_closed() + } } impl From for Client { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 449cfa482..fbcb81b77 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -98,6 +98,10 @@ impl Client { pub fn batch_execute(&mut self, query: &str) -> BatchExecute { BatchExecute(self.0.batch_execute(query)) } + + pub fn is_closed(&self) -> bool { + self.0.is_closed() + } } #[must_use = "futures do nothing unless polled"] diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index fdc807657..20bc841aa 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -60,6 +60,10 @@ impl Client { })) } + pub fn is_closed(&self) -> bool { + self.0.sender.is_closed() + } + pub fn downgrade(&self) -> WeakClient { WeakClient(Arc::downgrade(&self.0)) } From 0d3e18b251137418271f917e320134a439cafe52 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Dec 2018 13:38:35 -0800 Subject: [PATCH 059/819] Support nested transactions in sync API --- postgres/src/test.rs | 51 +++++++++++++++++++++++++++++++++++++ postgres/src/transaction.rs | 27 ++++++++++++++++++-- 2 files changed, 76 insertions(+), 2 deletions(-) diff --git a/postgres/src/test.rs b/postgres/src/test.rs index db7cc1d35..a55ce8e7a 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -93,3 +93,54 @@ fn transaction_drop() { let rows = client.query("SELECT * FROM foo", &[]).unwrap(); assert_eq!(rows.len(), 0); } + +#[test] +fn nested_transactions() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo (id) VALUES (1)", &[]) + .unwrap(); + + let mut transaction2 = transaction.transaction().unwrap(); + + transaction2 + .execute("INSERT INTO foo (id) VALUES (2)", &[]) + .unwrap(); + + transaction2.rollback().unwrap(); + + let rows = transaction + .query("SELECT id FROM foo ORDER BY id", &[]) + .unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); + + let mut transaction3 = transaction.transaction().unwrap(); + + transaction3 + .execute("INSERT INTO foo (id) VALUES(3)", &[]) + .unwrap(); + + let mut transaction4 = transaction3.transaction().unwrap(); + + transaction4 + .execute("INSERT INTO foo (id) VALUES(4)", &[]) + .unwrap(); + + transaction4.commit().unwrap(); + transaction3.commit().unwrap(); + transaction.commit().unwrap(); + + let rows = client.query("SELECT id FROM foo ORDER BY id", &[]).unwrap(); + assert_eq!(rows.len(), 3); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[1].get::<_, i32>(0), 3); + assert_eq!(rows[2].get::<_, i32>(0), 4); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index b61b5d585..0a2521778 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -5,6 +5,7 @@ use crate::{Client, Query, Statement}; pub struct Transaction<'a> { client: &'a mut Client, + depth: u32, done: bool, } @@ -20,13 +21,19 @@ impl<'a> Transaction<'a> { pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { Transaction { client, + depth: 0, done: false, } } pub fn commit(mut self) -> Result<(), Error> { self.done = true; - self.client.batch_execute("COMMIT") + if self.depth == 0 { + self.client.batch_execute("COMMIT") + } else { + self.client + .batch_execute(&format!("RELEASE sp{}", self.depth)) + } } pub fn rollback(mut self) -> Result<(), Error> { @@ -35,7 +42,12 @@ impl<'a> Transaction<'a> { } fn rollback_inner(&mut self) -> Result<(), Error> { - self.client.batch_execute("ROLLBACK") + if self.depth == 0 { + self.client.batch_execute("ROLLBACK") + } else { + self.client + .batch_execute(&format!("ROLLBACK TO sp{}", self.depth)) + } } pub fn prepare(&mut self, query: &str) -> Result { @@ -63,4 +75,15 @@ impl<'a> Transaction<'a> { pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.client.batch_execute(query) } + + pub fn transaction(&mut self) -> Result, Error> { + let depth = self.depth + 1; + self.client + .batch_execute(&format!("SAVEPOINT sp{}", depth))?; + Ok(Transaction { + client: self.client, + depth, + done: false, + }) + } } From 1fdfefbeda2658c3506aeb0a8172aac94bed6725 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Dec 2018 17:02:48 -0800 Subject: [PATCH 060/819] Add Client::poll_idle Closes #403 --- tokio-postgres/src/lib.rs | 4 +++ tokio-postgres/src/proto/client.rs | 44 ++++++++++++++++------- tokio-postgres/src/proto/connection.rs | 24 +++++++++---- tokio-postgres/src/proto/idle.rs | 47 ++++++++++++++++++++++++ tokio-postgres/src/proto/mod.rs | 1 + tokio-postgres/tests/test/main.rs | 50 ++++++++++++++++++++++++++ 6 files changed, 150 insertions(+), 20 deletions(-) create mode 100644 tokio-postgres/src/proto/idle.rs diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index fbcb81b77..7bddecfe6 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -102,6 +102,10 @@ impl Client { pub fn is_closed(&self) -> bool { self.0.is_closed() } + + pub fn poll_idle(&mut self) -> Poll<(), Error> { + self.0.poll_idle() + } } #[must_use = "futures do nothing unless polled"] diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 20bc841aa..e0c6341c9 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -1,7 +1,7 @@ use antidote::Mutex; use bytes::IntoBuf; use futures::sync::mpsc; -use futures::{AsyncSink, Sink, Stream}; +use futures::{AsyncSink, Poll, Sink, Stream}; use postgres_protocol; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -14,6 +14,7 @@ use crate::proto::connection::{Request, RequestMessages}; use crate::proto::copy_in::{CopyInFuture, CopyInReceiver, CopyMessage}; use crate::proto::copy_out::CopyOutStream; use crate::proto::execute::ExecuteFuture; +use crate::proto::idle::{IdleGuard, IdleState}; use crate::proto::portal::Portal; use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; @@ -22,7 +23,7 @@ use crate::proto::statement::Statement; use crate::types::{IsNull, Oid, ToSql, Type}; use crate::Error; -pub struct PendingRequest(Result); +pub struct PendingRequest(Result<(RequestMessages, IdleGuard), Error>); pub struct WeakClient(Weak); @@ -41,6 +42,7 @@ struct State { struct Inner { state: Mutex, + idle: IdleState, sender: mpsc::UnboundedSender, } @@ -56,6 +58,7 @@ impl Client { typeinfo_enum_query: None, typeinfo_composite_query: None, }), + idle: IdleState::new(), sender, })) } @@ -64,6 +67,10 @@ impl Client { self.0.sender.is_closed() } + pub fn poll_idle(&self) -> Poll<(), Error> { + self.0.idle.poll_idle() + } + pub fn downgrade(&self) -> WeakClient { WeakClient(Arc::downgrade(&self.0)) } @@ -101,11 +108,15 @@ impl Client { } pub fn send(&self, request: PendingRequest) -> Result, Error> { - let messages = request.0?; + let (messages, idle) = request.0?; let (sender, receiver) = mpsc::channel(0); self.0 .sender - .unbounded_send(Request { messages, sender }) + .unbounded_send(Request { + messages, + sender, + idle: Some(idle), + }) .map(|_| receiver) .map_err(|_| Error::closed()) } @@ -134,7 +145,7 @@ impl Client { pub fn execute(&self, statement: &Statement, params: &[&dyn ToSql]) -> ExecuteFuture { let pending = PendingRequest( self.excecute_message(statement, params) - .map(RequestMessages::Single), + .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), ); ExecuteFuture::new(self.clone(), pending, statement.clone()) } @@ -142,7 +153,7 @@ impl Client { pub fn query(&self, statement: &Statement, params: &[&dyn ToSql]) -> QueryStream { let pending = PendingRequest( self.excecute_message(statement, params) - .map(RequestMessages::Single), + .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), ); QueryStream::new(self.clone(), pending, statement.clone()) } @@ -152,7 +163,8 @@ impl Client { if let Ok(ref mut buf) = buf { frontend::sync(buf); } - let pending = PendingRequest(buf.map(RequestMessages::Single)); + let pending = + PendingRequest(buf.map(|m| (RequestMessages::Single(m), self.0.idle.guard()))); BindFuture::new(self.clone(), pending, name, statement.clone()) } @@ -183,10 +195,13 @@ impl Client { Ok(AsyncSink::Ready) => {} _ => unreachable!("channel should have capacity"), } - RequestMessages::CopyIn { - receiver: CopyInReceiver::new(receiver), - pending_message: None, - } + ( + RequestMessages::CopyIn { + receiver: CopyInReceiver::new(receiver), + pending_message: None, + }, + self.0.idle.guard(), + ) })); CopyInFuture::new(self.clone(), pending, statement.clone(), stream, sender) } @@ -194,7 +209,7 @@ impl Client { pub fn copy_out(&self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOutStream { let pending = PendingRequest( self.excecute_message(statement, params) - .map(RequestMessages::Single), + .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), ); CopyOutStream::new(self.clone(), pending, statement.clone()) } @@ -215,6 +230,7 @@ impl Client { let _ = self.0.sender.unbounded_send(Request { messages: RequestMessages::Single(buf), sender, + idle: None, }); } @@ -261,6 +277,8 @@ impl Client { F: FnOnce(&mut Vec) -> Result<(), Error>, { let mut buf = vec![]; - PendingRequest(messages(&mut buf).map(|()| RequestMessages::Single(buf))) + PendingRequest( + messages(&mut buf).map(|()| (RequestMessages::Single(buf), self.0.idle.guard())), + ) } } diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index 4684b4f3f..e4c80fa17 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -10,6 +10,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::codec::PostgresCodec; use crate::proto::copy_in::CopyInReceiver; +use crate::proto::idle::IdleGuard; use crate::{AsyncMessage, CancelData, Notification}; use crate::{DbError, Error}; @@ -24,6 +25,12 @@ pub enum RequestMessages { pub struct Request { pub messages: RequestMessages, pub sender: mpsc::Sender, + pub idle: Option, +} + +struct Response { + sender: mpsc::Sender, + _idle: Option, } #[derive(PartialEq, Debug)] @@ -40,7 +47,7 @@ pub struct Connection { receiver: mpsc::UnboundedReceiver, pending_request: Option, pending_response: Option, - responses: VecDeque>, + responses: VecDeque, state: State, } @@ -124,8 +131,8 @@ where m => m, }; - let mut sender = match self.responses.pop_front() { - Some(sender) => sender, + let mut response = match self.responses.pop_front() { + Some(response) => response, None => match message { Message::ErrorResponse(error) => return Err(Error::db(error)), _ => return Err(Error::unexpected_message()), @@ -137,16 +144,16 @@ where _ => false, }; - match sender.start_send(message) { + match response.sender.start_send(message) { // if the receiver's hung up we still need to page through the rest of the messages // designated to it Ok(AsyncSink::Ready) | Err(_) => { if !request_complete { - self.responses.push_front(sender); + self.responses.push_front(response); } } Ok(AsyncSink::NotReady(message)) => { - self.responses.push_front(sender); + self.responses.push_front(response); self.pending_response = Some(message); trace!("poll_read: waiting on sender"); return Ok(None); @@ -164,7 +171,10 @@ where match try_ready_receive!(self.receiver.poll()) { Some(request) => { trace!("polled new request"); - self.responses.push_back(request.sender); + self.responses.push_back(Response { + sender: request.sender, + _idle: request.idle, + }); Ok(Async::Ready(Some(request.messages))) } None => Ok(Async::Ready(None)), diff --git a/tokio-postgres/src/proto/idle.rs b/tokio-postgres/src/proto/idle.rs new file mode 100644 index 000000000..d4cbe8f06 --- /dev/null +++ b/tokio-postgres/src/proto/idle.rs @@ -0,0 +1,47 @@ +use futures::task::AtomicTask; +use futures::{Async, Poll}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +use crate::Error; + +struct Inner { + active: AtomicUsize, + task: AtomicTask, +} + +pub struct IdleState(Arc); + +impl IdleState { + pub fn new() -> IdleState { + IdleState(Arc::new(Inner { + active: AtomicUsize::new(0), + task: AtomicTask::new(), + })) + } + + pub fn guard(&self) -> IdleGuard { + self.0.active.fetch_add(1, Ordering::SeqCst); + IdleGuard(self.0.clone()) + } + + pub fn poll_idle(&self) -> Poll<(), Error> { + self.0.task.register(); + + if self.0.active.load(Ordering::SeqCst) == 0 { + Ok(Async::Ready(())) + } else { + Ok(Async::NotReady) + } + } +} + +pub struct IdleGuard(Arc); + +impl Drop for IdleGuard { + fn drop(&mut self) { + if self.0.active.fetch_sub(1, Ordering::SeqCst) == 1 { + self.0.task.notify(); + } + } +} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index de81620e4..079deeee6 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -31,6 +31,7 @@ mod copy_in; mod copy_out; mod execute; mod handshake; +mod idle; mod portal; mod prepare; mod query; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 41918e65c..b6be86628 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -4,6 +4,7 @@ use futures::sync::mpsc; use futures::{future, stream, try_ready}; use log::debug; use std::error::Error; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Duration, Instant}; use tokio::net::TcpStream; use tokio::prelude::*; @@ -683,3 +684,52 @@ fn transaction_builder_around_moved_client() { drop(client); runtime.run().unwrap(); } + +#[test] +fn poll_idle() { + struct IdleFuture { + client: tokio_postgres::Client, + query: Option, + } + + impl Future for IdleFuture { + type Item = (); + type Error = tokio_postgres::Error; + + fn poll(&mut self) -> Poll<(), tokio_postgres::Error> { + if let Some(_) = self.query.take() { + assert!(!self.client.poll_idle().unwrap().is_ready()); + return Ok(Async::NotReady); + } + + try_ready!(self.client.poll_idle()); + assert!(QUERY_DONE.load(Ordering::SeqCst)); + + Ok(Async::Ready(())) + } + } + + static QUERY_DONE: AtomicBool = AtomicBool::new(false); + + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + + let stmt = runtime.block_on(client.prepare("SELECT 1")).unwrap(); + + let query = client + .query(&stmt, &[]) + .collect() + .map(|_| QUERY_DONE.store(true, Ordering::SeqCst)) + .map_err(|e| panic!("{}", e)); + runtime.spawn(query); + + let future = IdleFuture { + query: Some(client.prepare("")), + client, + }; + runtime.block_on(future).unwrap(); +} From 7eaac1cb1aca2156ee11ff6a0892df10e24a580c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Dec 2018 21:42:03 -0800 Subject: [PATCH 061/819] Sync copy_in support --- postgres/src/client.rs | 104 ++++++++++++++++++++++++++++- postgres/src/test.rs | 27 ++++++++ tokio-postgres/src/proto/client.rs | 2 +- 3 files changed, 131 insertions(+), 2 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 8dd7b05df..6cc189286 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,4 +1,6 @@ -use futures::{Future, Stream}; +use futures::sync::mpsc; +use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; +use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; #[cfg(feature = "runtime")] @@ -52,6 +54,29 @@ impl Client { self.0.query(&statement.0, params).collect().wait() } + pub fn copy_in( + &mut self, + query: &T, + params: &[&dyn ToSql], + reader: R, + ) -> Result + where + T: ?Sized + Query, + R: Read, + { + let statement = query.__statement(self)?; + let (sender, receiver) = mpsc::channel(1); + let future = self.0.copy_in(&statement.0, params, CopyInStream(receiver)); + + CopyInFuture { + future, + sender, + reader, + pending: None, + } + .wait() + } + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.0.batch_execute(query).wait() } @@ -71,3 +96,80 @@ impl From for Client { Client(c) } } + +enum CopyData { + Data(Vec), + Error(io::Error), + Done, +} + +struct CopyInStream(mpsc::Receiver); + +impl Stream for CopyInStream { + type Item = Vec; + type Error = io::Error; + + fn poll(&mut self) -> Poll>, io::Error> { + match self.0.poll().expect("mpsc::Receiver can't error") { + Async::Ready(Some(CopyData::Data(buf))) => Ok(Async::Ready(Some(buf))), + Async::Ready(Some(CopyData::Error(e))) => Err(e), + Async::Ready(Some(CopyData::Done)) => Ok(Async::Ready(None)), + Async::Ready(None) => Err(io::Error::new(io::ErrorKind::Other, "writer disconnected")), + Async::NotReady => Ok(Async::NotReady), + } + } +} + +struct CopyInFuture { + future: tokio_postgres::CopyIn, + sender: mpsc::Sender, + reader: R, + pending: Option, +} + +impl CopyInFuture { + fn poll_send_data(&mut self, data: CopyData) -> Poll<(), Error> { + match self.sender.start_send(data) { + Ok(AsyncSink::Ready) => Ok(Async::Ready(())), + Ok(AsyncSink::NotReady(pending)) => { + self.pending = Some(pending); + return Ok(Async::NotReady); + } + // the future's hung up on its end of the channel, so we'll wait for it to report an error + Err(_) => { + self.pending = Some(CopyData::Done); + return Ok(Async::NotReady); + } + } + } +} + +impl Future for CopyInFuture +where + R: Read, +{ + type Item = u64; + type Error = Error; + + fn poll(&mut self) -> Poll { + if let Async::Ready(n) = self.future.poll()? { + return Ok(Async::Ready(n)); + } + + loop { + let data = match self.pending.take() { + Some(pending) => pending, + None => { + let mut buf = vec![]; + match self.reader.by_ref().take(4096).read_to_end(&mut buf) { + Ok(0) => CopyData::Done, + Ok(_) => CopyData::Data(buf), + Err(e) => CopyData::Error(e), + } + } + }; + + try_ready!(self.poll_send_data(data)); + } + } +} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index a55ce8e7a..c86f23274 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -144,3 +144,30 @@ fn nested_transactions() { assert_eq!(rows[1].get::<_, i32>(0), 3); assert_eq!(rows[2].get::<_, i32>(0), 4); } + +#[test] +fn copy_in() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") + .unwrap(); + + client + .copy_in( + "COPY foo FROM stdin", + &[], + &mut &b"1\tsteven\n2\ttimothy"[..], + ) + .unwrap(); + + let rows = client + .query("SELECT id, name FROM foo ORDER BY id", &[]) + .unwrap(); + + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[0].get::<_, &str>(1), "steven"); + assert_eq!(rows[1].get::<_, i32>(0), 2); + assert_eq!(rows[1].get::<_, &str>(1), "timothy"); +} diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index e0c6341c9..cf22add69 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -189,7 +189,7 @@ impl Client { ::Buf: Send, S::Error: Into>, { - let (mut sender, receiver) = mpsc::channel(0); + let (mut sender, receiver) = mpsc::channel(1); let pending = PendingRequest(self.excecute_message(statement, params).map(|buf| { match sender.start_send(CopyMessage::Data(buf)) { Ok(AsyncSink::Ready) => {} From ee8d3a76325c8c265a92a3963b595b5ad1dbbb14 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Dec 2018 21:43:52 -0800 Subject: [PATCH 062/819] Fix clippy --- postgres/src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 6cc189286..fbaa933b9 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -133,12 +133,12 @@ impl CopyInFuture { Ok(AsyncSink::Ready) => Ok(Async::Ready(())), Ok(AsyncSink::NotReady(pending)) => { self.pending = Some(pending); - return Ok(Async::NotReady); + Ok(Async::NotReady) } // the future's hung up on its end of the channel, so we'll wait for it to report an error Err(_) => { self.pending = Some(CopyData::Done); - return Ok(Async::NotReady); + Ok(Async::NotReady) } } } From 1b29330a9609a0552166ce7d9077c57eeb0c8cae Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Dec 2018 22:09:27 -0800 Subject: [PATCH 063/819] Sync transaction copy_in --- postgres/src/transaction.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 0a2521778..a541ee918 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,3 +1,4 @@ +use std::io::Read; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; @@ -72,6 +73,19 @@ impl<'a> Transaction<'a> { self.client.query(query, params) } + pub fn copy_in( + &mut self, + query: &T, + params: &[&dyn ToSql], + reader: R, + ) -> Result + where + T: ?Sized + Query, + R: Read, + { + self.client.copy_in(query, params, reader) + } + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.client.batch_execute(query) } From 2b1cac40c353e75064f512e6d81be201c05f8595 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 12:22:25 -0800 Subject: [PATCH 064/819] Clean up CopyInFuture --- postgres/src/client.rs | 56 +++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index fbaa933b9..2f2496e21 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,5 +1,5 @@ use futures::sync::mpsc; -use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; +use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; @@ -73,6 +73,7 @@ impl Client { sender, reader, pending: None, + done: false, } .wait() } @@ -125,23 +126,7 @@ struct CopyInFuture { sender: mpsc::Sender, reader: R, pending: Option, -} - -impl CopyInFuture { - fn poll_send_data(&mut self, data: CopyData) -> Poll<(), Error> { - match self.sender.start_send(data) { - Ok(AsyncSink::Ready) => Ok(Async::Ready(())), - Ok(AsyncSink::NotReady(pending)) => { - self.pending = Some(pending); - Ok(Async::NotReady) - } - // the future's hung up on its end of the channel, so we'll wait for it to report an error - Err(_) => { - self.pending = Some(CopyData::Done); - Ok(Async::NotReady) - } - } - } + done: bool, } impl Future for CopyInFuture @@ -152,24 +137,45 @@ where type Error = Error; fn poll(&mut self) -> Poll { - if let Async::Ready(n) = self.future.poll()? { - return Ok(Async::Ready(n)); - } - loop { + if let Async::Ready(n) = self.future.poll()? { + return Ok(Async::Ready(n)); + } + let data = match self.pending.take() { Some(pending) => pending, None => { + if self.done { + continue; + } + let mut buf = vec![]; match self.reader.by_ref().take(4096).read_to_end(&mut buf) { - Ok(0) => CopyData::Done, + Ok(0) => { + self.done = true; + CopyData::Done + } Ok(_) => CopyData::Data(buf), - Err(e) => CopyData::Error(e), + Err(e) => { + self.done = true; + CopyData::Error(e) + } } } }; - try_ready!(self.poll_send_data(data)); + match self.sender.start_send(data) { + Ok(AsyncSink::Ready) => {} + Ok(AsyncSink::NotReady(pending)) => { + self.pending = Some(pending); + return Ok(Async::NotReady); + } + // the future's hung up on its end of the channel, so we'll wait for it to error + Err(_) => { + self.done = true; + return Ok(Async::NotReady); + } + } } } } From 793c5f1b872ccabd03b9a744889ef033646e628d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 13:08:02 -0800 Subject: [PATCH 065/819] Add sync copy_out --- postgres/Cargo.toml | 1 + postgres/src/client.rs | 63 ++++++++++++++++++++++++++++++++++++- postgres/src/test.rs | 26 +++++++++++++++ postgres/src/transaction.rs | 13 +++++++- 4 files changed, 101 insertions(+), 2 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 03176d028..a59c0794d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -10,6 +10,7 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] +bytes = "0.4" futures = "0.1" tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2f2496e21..f038e1c2d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,6 +1,9 @@ +use bytes::{Buf, Bytes}; +use futures::stream; use futures::sync::mpsc; use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; -use std::io::{self, Read}; +use std::io::{self, BufRead, Cursor, Read}; +use std::marker::PhantomData; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; #[cfg(feature = "runtime")] @@ -78,6 +81,30 @@ impl Client { .wait() } + pub fn copy_out( + &mut self, + query: &T, + params: &[&dyn ToSql], + ) -> Result, Error> + where + T: ?Sized + Query, + { + let statement = query.__statement(self)?; + let mut stream = self.0.copy_out(&statement.0, params).wait(); + + let cur = match stream.next() { + Some(Ok(cur)) => cur, + Some(Err(e)) => return Err(e), + None => Bytes::new(), + }; + + Ok(CopyOutReader { + stream, + cur: Cursor::new(cur), + _p: PhantomData, + }) + } + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.0.batch_execute(query).wait() } @@ -179,3 +206,37 @@ where } } } + +pub struct CopyOutReader<'a> { + stream: stream::Wait, + cur: Cursor, + _p: PhantomData<&'a mut ()>, +} + +impl<'a> Read for CopyOutReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let b = self.fill_buf()?; + let len = usize::min(buf.len(), b.len()); + buf[..len].copy_from_slice(&b[..len]); + self.consume(len); + Ok(len) + } +} + +impl<'a> BufRead for CopyOutReader<'a> { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + if self.cur.remaining() == 0 { + match self.stream.next() { + Some(Ok(cur)) => self.cur = Cursor::new(cur), + Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), + None => {} + }; + } + + Ok(Buf::bytes(&self.cur)) + } + + fn consume(&mut self, amt: usize) { + self.cur.advance(amt); + } +} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index c86f23274..70f8537b2 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,3 +1,4 @@ +use std::io::Read; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -171,3 +172,28 @@ fn copy_in() { assert_eq!(rows[1].get::<_, i32>(0), 2); assert_eq!(rows[1].get::<_, &str>(1), "timothy"); } + +#[test] +fn copy_out() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo (id INT, name TEXT); + + INSERT INTO foo (id, name) VALUES (1, 'steven'), (2, 'timothy'); + ", + ) + .unwrap(); + + let mut reader = client + .copy_out("COPY foo (id, name) TO STDOUT", &[]) + .unwrap(); + let mut s = String::new(); + reader.read_to_string(&mut s).unwrap(); + + assert_eq!(s, "1\tsteven\n2\ttimothy\n"); + + client.batch_execute("SELECT 1").unwrap(); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index a541ee918..0fa19b9b2 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -2,7 +2,7 @@ use std::io::Read; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; -use crate::{Client, Query, Statement}; +use crate::{Client, CopyOutReader, Query, Statement}; pub struct Transaction<'a> { client: &'a mut Client, @@ -86,6 +86,17 @@ impl<'a> Transaction<'a> { self.client.copy_in(query, params, reader) } + pub fn copy_out( + &mut self, + query: &T, + params: &[&dyn ToSql], + ) -> Result, Error> + where + T: ?Sized + Query, + { + self.client.copy_out(query, params) + } + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.client.batch_execute(query) } From ec680b1e0e344e72a4ee6996b63bd86e6ffb25dc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 13:28:13 -0800 Subject: [PATCH 066/819] Avoid NLL letting CopyOutReader borrow drop early --- postgres/src/client.rs | 5 +++++ postgres/src/test.rs | 1 + 2 files changed, 6 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index f038e1c2d..6ce116500 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -213,6 +213,11 @@ pub struct CopyOutReader<'a> { _p: PhantomData<&'a mut ()>, } +// no-op impl to extend borrow until drop +impl<'a> Drop for CopyOutReader<'a> { + fn drop(&mut self) {} +} + impl<'a> Read for CopyOutReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let b = self.fill_buf()?; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 70f8537b2..e7bb778a5 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -192,6 +192,7 @@ fn copy_out() { .unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); + drop(reader); assert_eq!(s, "1\tsteven\n2\ttimothy\n"); From a1fc412f9e5036149981ea3400159c47602d280a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 15:02:42 -0800 Subject: [PATCH 067/819] Simplify sync copy_in --- postgres/src/client.rs | 99 ++++++------------------------------------ 1 file changed, 13 insertions(+), 86 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 6ce116500..99d648420 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,7 +1,6 @@ use bytes::{Buf, Bytes}; use futures::stream; -use futures::sync::mpsc; -use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; +use futures::{Async, Future, Poll, Stream}; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; use tokio_postgres::types::{ToSql, Type}; @@ -68,17 +67,9 @@ impl Client { R: Read, { let statement = query.__statement(self)?; - let (sender, receiver) = mpsc::channel(1); - let future = self.0.copy_in(&statement.0, params, CopyInStream(receiver)); - - CopyInFuture { - future, - sender, - reader, - pending: None, - done: false, - } - .wait() + self.0 + .copy_in(&statement.0, params, CopyInStream(reader)) + .wait() } pub fn copy_out( @@ -125,84 +116,20 @@ impl From for Client { } } -enum CopyData { - Data(Vec), - Error(io::Error), - Done, -} - -struct CopyInStream(mpsc::Receiver); +struct CopyInStream(R); -impl Stream for CopyInStream { - type Item = Vec; - type Error = io::Error; - - fn poll(&mut self) -> Poll>, io::Error> { - match self.0.poll().expect("mpsc::Receiver can't error") { - Async::Ready(Some(CopyData::Data(buf))) => Ok(Async::Ready(Some(buf))), - Async::Ready(Some(CopyData::Error(e))) => Err(e), - Async::Ready(Some(CopyData::Done)) => Ok(Async::Ready(None)), - Async::Ready(None) => Err(io::Error::new(io::ErrorKind::Other, "writer disconnected")), - Async::NotReady => Ok(Async::NotReady), - } - } -} - -struct CopyInFuture { - future: tokio_postgres::CopyIn, - sender: mpsc::Sender, - reader: R, - pending: Option, - done: bool, -} - -impl Future for CopyInFuture +impl Stream for CopyInStream where R: Read, { - type Item = u64; - type Error = Error; - - fn poll(&mut self) -> Poll { - loop { - if let Async::Ready(n) = self.future.poll()? { - return Ok(Async::Ready(n)); - } - - let data = match self.pending.take() { - Some(pending) => pending, - None => { - if self.done { - continue; - } - - let mut buf = vec![]; - match self.reader.by_ref().take(4096).read_to_end(&mut buf) { - Ok(0) => { - self.done = true; - CopyData::Done - } - Ok(_) => CopyData::Data(buf), - Err(e) => { - self.done = true; - CopyData::Error(e) - } - } - } - }; + type Item = Vec; + type Error = io::Error; - match self.sender.start_send(data) { - Ok(AsyncSink::Ready) => {} - Ok(AsyncSink::NotReady(pending)) => { - self.pending = Some(pending); - return Ok(Async::NotReady); - } - // the future's hung up on its end of the channel, so we'll wait for it to error - Err(_) => { - self.done = true; - return Ok(Async::NotReady); - } - } + fn poll(&mut self) -> Poll>, io::Error> { + let mut buf = vec![]; + match self.0.by_ref().take(4096).read_to_end(&mut buf)? { + 0 => Ok(Async::Ready(None)), + _ => Ok(Async::Ready(Some(buf))), } } } From 760e4a4f3b0169a1bff56fb647466cce50ef069b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 15:50:41 -0800 Subject: [PATCH 068/819] Guarantee some buffer for messages --- tokio-postgres/src/proto/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index cf22add69..54d631c20 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -109,7 +109,7 @@ impl Client { pub fn send(&self, request: PendingRequest) -> Result, Error> { let (messages, idle) = request.0?; - let (sender, receiver) = mpsc::channel(0); + let (sender, receiver) = mpsc::channel(1); self.0 .sender .unbounded_send(Request { From 45b078982ac440e7cab9d6e342ef8abb0de4142a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Dec 2018 15:58:39 -0800 Subject: [PATCH 069/819] Add accessors for the async client --- postgres/src/client.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 99d648420..6f0bea565 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -108,6 +108,18 @@ impl Client { pub fn is_closed(&self) -> bool { self.0.is_closed() } + + pub fn get_ref(&self) -> &tokio_postgres::Client { + &self.0 + } + + pub fn get_mut(&mut self) -> &mut tokio_postgres::Client { + &mut self.0 + } + + pub fn into_inner(self) -> tokio_postgres::Client { + self.0 + } } impl From for Client { From be022b537cf9000dab3b7210b4393db42729a58b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 24 Dec 2018 13:02:48 -0500 Subject: [PATCH 070/819] Fix poll_idle test race --- tokio-postgres/tests/test/main.rs | 83 ++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 19 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index b6be86628..1f72a7eae 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -686,25 +686,29 @@ fn transaction_builder_around_moved_client() { } #[test] -fn poll_idle() { - struct IdleFuture { - client: tokio_postgres::Client, - query: Option, +fn poll_idle_running() { + struct DelayStream(Delay); + + impl Stream for DelayStream { + type Item = Vec; + type Error = tokio_postgres::Error; + + fn poll(&mut self) -> Poll>, tokio_postgres::Error> { + try_ready!(self.0.poll().map_err(|e| panic!("{}", e))); + QUERY_DONE.store(true, Ordering::SeqCst); + Ok(Async::Ready(None)) + } } + struct IdleFuture(tokio_postgres::Client); + impl Future for IdleFuture { type Item = (); type Error = tokio_postgres::Error; fn poll(&mut self) -> Poll<(), tokio_postgres::Error> { - if let Some(_) = self.query.take() { - assert!(!self.client.poll_idle().unwrap().is_ready()); - return Ok(Async::NotReady); - } - - try_ready!(self.client.poll_idle()); + try_ready!(self.0.poll_idle()); assert!(QUERY_DONE.load(Ordering::SeqCst)); - Ok(Async::Ready(())) } } @@ -718,18 +722,59 @@ fn poll_idle() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); - let stmt = runtime.block_on(client.prepare("SELECT 1")).unwrap(); + let execute = client.batch_execute("CREATE TEMPORARY TABLE foo (id INT)"); + runtime.block_on(execute).unwrap(); + + let prepare = client.prepare("COPY foo FROM STDIN"); + let stmt = runtime.block_on(prepare).unwrap(); + let copy_in = client.copy_in( + &stmt, + &[], + DelayStream(Delay::new(Instant::now() + Duration::from_millis(10))), + ); + let copy_in = copy_in.map(|_| ()).map_err(|e| panic!("{}", e)); + runtime.spawn(copy_in); - let query = client - .query(&stmt, &[]) - .collect() - .map(|_| QUERY_DONE.store(true, Ordering::SeqCst)) - .map_err(|e| panic!("{}", e)); - runtime.spawn(query); + let future = IdleFuture(client); + runtime.block_on(future).unwrap(); +} + +#[test] +fn poll_idle_new() { + struct IdleFuture { + client: tokio_postgres::Client, + prepare: Option, + } + + impl Future for IdleFuture { + type Item = (); + type Error = tokio_postgres::Error; + + fn poll(&mut self) -> Poll<(), tokio_postgres::Error> { + match self.prepare.take() { + Some(_future) => { + assert!(!self.client.poll_idle().unwrap().is_ready()); + Ok(Async::NotReady) + } + None => { + assert!(self.client.poll_idle().unwrap().is_ready()); + Ok(Async::Ready(())) + } + } + } + } + + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + let prepare = client.prepare(""); let future = IdleFuture { - query: Some(client.prepare("")), client, + prepare: Some(prepare), }; runtime.block_on(future).unwrap(); } From d5104bc4734a748910faeb9d069ac88642a5a0c7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 25 Dec 2018 15:35:33 -0500 Subject: [PATCH 071/819] Turn on TCP nodelay in socket --- tokio-postgres/src/proto/connect_once.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 929428699..e50351853 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -117,7 +117,7 @@ where return Err(Error::connect(io::Error::new( io::ErrorKind::InvalidData, "resolved 0 addresses", - ))) + ))); } }; @@ -134,7 +134,7 @@ where ) -> Poll, Error> { let stream = loop { match state.future.poll() { - Ok(Async::Ready(stream)) => break Socket::new_tcp(stream), + Ok(Async::Ready(stream)) => break stream, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(e) => { let addr = match state.addrs.next() { @@ -147,6 +147,9 @@ where }; let state = state.take(); + stream.set_nodelay(true).map_err(Error::connect)?; + let stream = Socket::new_tcp(stream); + transition!(Handshaking { future: HandshakeFuture::new(stream, state.tls_mode, state.params), }) From defe764520539039e3e40491d2491928c105ecd1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 25 Dec 2018 16:11:33 -0500 Subject: [PATCH 072/819] Support connect_timeout --- tokio-postgres/Cargo.toml | 3 +- tokio-postgres/src/error/mod.rs | 27 ++++++++++++ tokio-postgres/src/proto/connect_once.rs | 53 +++++++++++++++++++++++- 3 files changed, 81 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 349d4cdc2..f058446a8 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -28,7 +28,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio-tcp", "tokio-uds", "futures-cpupool", "lazy_static"] +runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_static"] "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] @@ -53,6 +53,7 @@ void = "1.0" tokio-tcp = { version = "0.1", optional = true } futures-cpupool = { version = "0.1", optional = true } lazy_static = { version = "1.0", optional = true } +tokio-timer = { version = "0.2", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index bc3913a46..1b7fca2cd 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -356,6 +356,12 @@ enum Kind { InvalidPort, #[cfg(feature = "runtime")] InvalidPortCount, + #[cfg(feature = "runtime")] + InvalidConnectTimeout, + #[cfg(feature = "runtime")] + Timer, + #[cfg(feature = "runtime")] + ConnectTimeout, } struct ErrorInner { @@ -401,6 +407,12 @@ impl fmt::Display for Error { Kind::InvalidPort => "invalid port", #[cfg(feature = "runtime")] Kind::InvalidPortCount => "wrong number of ports provided", + #[cfg(feature = "runtime")] + Kind::InvalidConnectTimeout => "invalid connect_timeout", + #[cfg(feature = "runtime")] + Kind::Timer => "timer error", + #[cfg(feature = "runtime")] + Kind::ConnectTimeout => "timed out connecting to server", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -523,4 +535,19 @@ impl Error { pub(crate) fn invalid_port_count() -> Error { Error::new(Kind::InvalidPortCount, None) } + + #[cfg(feature = "runtime")] + pub(crate) fn invalid_connect_timeout(e: ParseIntError) -> Error { + Error::new(Kind::InvalidConnectTimeout, Some(Box::new(e))) + } + + #[cfg(feature = "runtime")] + pub(crate) fn timer(e: tokio_timer::Error) -> Error { + Error::new(Kind::Timer, Some(Box::new(e))) + } + + #[cfg(feature = "runtime")] + pub(crate) fn connect_timeout() -> Error { + Error::new(Kind::ConnectTimeout, None) + } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index e50351853..4dba5c3ca 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -7,8 +7,10 @@ use std::io; use std::net::{SocketAddr, ToSocketAddrs}; #[cfg(unix)] use std::path::Path; +use std::time::{Duration, Instant}; use std::vec; use tokio_tcp::TcpStream; +use tokio_timer::Delay; #[cfg(unix)] use tokio_uds::UnixStream; @@ -40,12 +42,16 @@ where #[state_machine_future(transitions(Handshaking))] ConnectingUnix { future: tokio_uds::ConnectFuture, + connect_timeout: Option, + timeout: Option, tls_mode: T, params: HashMap, }, #[state_machine_future(transitions(ConnectingTcp))] ResolvingDns { future: CpuFuture, io::Error>, + connect_timeout: Option, + timeout: Option, tls_mode: T, params: HashMap, }, @@ -53,6 +59,8 @@ where ConnectingTcp { future: tokio_tcp::ConnectFuture, addrs: vec::IntoIter, + connect_timeout: Option, + timeout: Option, tls_mode: T, params: HashMap, }, @@ -69,7 +77,20 @@ where T: TlsMode, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let state = state.take(); + let mut state = state.take(); + + let connect_timeout = match state.params.remove("connect_timeout") { + Some(s) => { + let seconds = s.parse::().map_err(Error::invalid_connect_timeout)?; + if seconds <= 0 { + None + } else { + Some(Duration::from_secs(seconds as u64)) + } + } + None => None, + }; + let timeout = connect_timeout.map(|d| Delay::new(Instant::now() + d)); #[cfg(unix)] { @@ -77,6 +98,8 @@ where let path = Path::new(&state.host).join(format!(".s.PGSQL.{}", state.port)); transition!(ConnectingUnix { future: UnixStream::connect(path), + connect_timeout, + timeout, tls_mode: state.tls_mode, params: state.params, }) @@ -87,6 +110,8 @@ where let port = state.port; transition!(ResolvingDns { future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), + connect_timeout, + timeout, tls_mode: state.tls_mode, params: state.params, }) @@ -96,6 +121,14 @@ where fn poll_connecting_unix<'a>( state: &'a mut RentToOwn<'a, ConnectingUnix>, ) -> Poll, Error> { + if let Some(timeout) = &mut state.timeout { + match timeout.poll() { + Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::NotReady) => {} + Err(e) => return Err(Error::timer(e)), + } + } + let stream = try_ready!(state.future.poll().map_err(Error::connect)); let stream = Socket::new_unix(stream); let state = state.take(); @@ -108,6 +141,14 @@ where fn poll_resolving_dns<'a>( state: &'a mut RentToOwn<'a, ResolvingDns>, ) -> Poll, Error> { + if let Some(timeout) = &mut state.timeout { + match timeout.poll() { + Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::NotReady) => {} + Err(e) => return Err(Error::timer(e)), + } + } + let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); let state = state.take(); @@ -124,6 +165,8 @@ where transition!(ConnectingTcp { future: TcpStream::connect(&addr), addrs, + connect_timeout: state.connect_timeout, + timeout: state.timeout, tls_mode: state.tls_mode, params: state.params, }) @@ -132,6 +175,14 @@ where fn poll_connecting_tcp<'a>( state: &'a mut RentToOwn<'a, ConnectingTcp>, ) -> Poll, Error> { + if let Some(timeout) = &mut state.timeout { + match timeout.poll() { + Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::NotReady) => {} + Err(e) => return Err(Error::timer(e)), + } + } + let stream = loop { match state.future.poll() { Ok(Async::Ready(stream)) => break stream, From aa1e5874330f184784e8d180d9a8f73909436914 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 27 Dec 2018 13:51:39 -0500 Subject: [PATCH 073/819] Make internal simple query future a stream --- tokio-postgres/src/lib.rs | 8 ++- tokio-postgres/src/proto/client.rs | 6 +- tokio-postgres/src/proto/mod.rs | 2 +- tokio-postgres/src/proto/simple_query.rs | 82 ++++++++++++++---------- tokio-postgres/src/proto/transaction.rs | 34 +++++----- tokio-postgres/src/row.rs | 48 ++++++++++++++ tokio-postgres/src/stmt.rs | 3 +- 7 files changed, 125 insertions(+), 58 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 7bddecfe6..660dc4db7 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -9,7 +9,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; pub use crate::builder::*; pub use crate::error::*; use crate::proto::CancelFuture; -pub use crate::row::{Row, RowIndex}; +pub use crate::row::*; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::stmt::Column; @@ -358,14 +358,16 @@ where } #[must_use = "futures do nothing unless polled"] -pub struct BatchExecute(proto::SimpleQueryFuture); +pub struct BatchExecute(proto::SimpleQueryStream); impl Future for BatchExecute { type Item = (); type Error = Error; fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() + while let Some(_) = try_ready!(self.0.poll()) {} + + Ok(Async::Ready(())) } } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 54d631c20..f41b5bfe7 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -18,7 +18,7 @@ use crate::proto::idle::{IdleGuard, IdleState}; use crate::proto::portal::Portal; use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; -use crate::proto::simple_query::SimpleQueryFuture; +use crate::proto::simple_query::SimpleQueryStream; use crate::proto::statement::Statement; use crate::types::{IsNull, Oid, ToSql, Type}; use crate::Error; @@ -121,13 +121,13 @@ impl Client { .map_err(|_| Error::closed()) } - pub fn batch_execute(&self, query: &str) -> SimpleQueryFuture { + pub fn batch_execute(&self, query: &str) -> SimpleQueryStream { let pending = self.pending(|buf| { frontend::query(query, buf).map_err(Error::parse)?; Ok(()) }); - SimpleQueryFuture::new(self.clone(), pending) + SimpleQueryStream::new(self.clone(), pending) } pub fn prepare(&self, name: String, query: &str, param_types: &[Type]) -> PrepareFuture { diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 079deeee6..ed460aabe 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -59,7 +59,7 @@ pub use crate::proto::handshake::HandshakeFuture; pub use crate::proto::portal::Portal; pub use crate::proto::prepare::PrepareFuture; pub use crate::proto::query::QueryStream; -pub use crate::proto::simple_query::SimpleQueryFuture; +pub use crate::proto::simple_query::SimpleQueryStream; pub use crate::proto::statement::Statement; pub use crate::proto::tls::TlsFuture; pub use crate::proto::transaction::TransactionFuture; diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index d1c279305..519bb0e12 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -1,56 +1,70 @@ use futures::sync::mpsc; -use futures::{Poll, Stream}; +use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use std::mem; use crate::proto::client::{Client, PendingRequest}; -use crate::Error; +use crate::{Error, StringRow}; -#[derive(StateMachineFuture)] -pub enum SimpleQuery { - #[state_machine_future(start, transitions(ReadResponse))] +pub enum State { Start { client: Client, request: PendingRequest, }, - #[state_machine_future(transitions(Finished))] - ReadResponse { receiver: mpsc::Receiver }, - #[state_machine_future(ready)] - Finished(()), - #[state_machine_future(error)] - Failed(Error), + ReadResponse { + receiver: mpsc::Receiver, + }, + Done, } -impl PollSimpleQuery for SimpleQuery { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - let receiver = state.client.send(state.request)?; +pub struct SimpleQueryStream(State); - transition!(ReadResponse { receiver }) - } +impl Stream for SimpleQueryStream { + type Item = StringRow; + type Error = Error; - fn poll_read_response<'a>( - state: &'a mut RentToOwn<'a, ReadResponse>, - ) -> Poll { + fn poll(&mut self) -> Poll, Error> { loop { - let message = try_ready_receive!(state.receiver.poll()); + match mem::replace(&mut self.0, State::Done) { + State::Start { client, request } => { + let receiver = client.send(request)?; + self.0 = State::ReadResponse { receiver }; + } + State::ReadResponse { mut receiver } => { + let message = match receiver.poll() { + Ok(Async::Ready(message)) => message, + Ok(Async::NotReady) => { + self.0 = State::ReadResponse { receiver }; + return Ok(Async::NotReady); + } + Err(()) => unreachable!("mpsc receiver can't panic"), + }; - match message { - Some(Message::CommandComplete(_)) - | Some(Message::RowDescription(_)) - | Some(Message::DataRow(_)) - | Some(Message::EmptyQueryResponse) => {} - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(Message::ReadyForQuery(_)) => transition!(Finished(())), - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), + match message { + Some(Message::CommandComplete(_)) + | Some(Message::RowDescription(_)) + | Some(Message::EmptyQueryResponse) => { + self.0 = State::ReadResponse { receiver }; + } + Some(Message::DataRow(body)) => { + self.0 = State::ReadResponse { receiver }; + let row = StringRow::new(body)?; + return Ok(Async::Ready(Some(row))); + } + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(Message::ReadyForQuery(_)) => return Ok(Async::Ready(None)), + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + } + } + State::Done => return Ok(Async::Ready(None)), } } } } -impl SimpleQueryFuture { - pub fn new(client: Client, request: PendingRequest) -> SimpleQueryFuture { - SimpleQuery::start(client, request) +impl SimpleQueryStream { + pub fn new(client: Client, request: PendingRequest) -> SimpleQueryStream { + SimpleQueryStream(State::Start { client, request }) } } diff --git a/tokio-postgres/src/proto/transaction.rs b/tokio-postgres/src/proto/transaction.rs index 4f5fa3870..722336415 100644 --- a/tokio-postgres/src/proto/transaction.rs +++ b/tokio-postgres/src/proto/transaction.rs @@ -1,6 +1,6 @@ use crate::proto::client::Client; -use crate::proto::simple_query::SimpleQueryFuture; -use futures::{try_ready, Async, Future, Poll}; +use crate::proto::simple_query::SimpleQueryStream; +use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use crate::Error; @@ -16,14 +16,14 @@ where #[state_machine_future(transitions(Running))] Beginning { client: Client, - begin: SimpleQueryFuture, + begin: SimpleQueryStream, future: F, }, #[state_machine_future(transitions(Finishing))] Running { client: Client, future: F }, #[state_machine_future(transitions(Finished))] Finishing { - future: SimpleQueryFuture, + future: SimpleQueryStream, result: Result, }, #[state_machine_future(ready)] @@ -51,7 +51,8 @@ where fn poll_beginning<'a>( state: &'a mut RentToOwn<'a, Beginning>, ) -> Poll, E> { - try_ready!(state.begin.poll()); + while let Some(_) = try_ready!(state.begin.poll()) {} + let state = state.take(); transition!(Running { client: state.client, @@ -78,17 +79,20 @@ where fn poll_finishing<'a>( state: &'a mut RentToOwn<'a, Finishing>, ) -> Poll, E> { - match state.future.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(())) => { - let t = state.take().result?; - transition!(Finished(t)) + loop { + match state.future.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(Some(_))) => {} + Ok(Async::Ready(None)) => { + let t = state.take().result?; + transition!(Finished(t)) + } + Err(e) => match state.take().result { + Ok(_) => return Err(e.into()), + // prioritize the future's error over the rollback error + Err(e) => return Err(e), + }, } - Err(e) => match state.take().result { - Ok(_) => Err(e.into()), - // prioritize the future's error over the rollback error - Err(e) => Err(e), - }, } } } diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index d63021305..b2f6b69ec 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -2,6 +2,7 @@ use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend::DataRowBody; use std::fmt; use std::ops::Range; +use std::str; use crate::proto; use crate::row::sealed::Sealed; @@ -133,3 +134,50 @@ impl Row { value.map(Some).map_err(Error::from_sql) } } + +pub struct StringRow { + body: DataRowBody, + ranges: Vec>>, +} + +impl StringRow { + #[allow(clippy::new_ret_no_self)] + pub(crate) fn new(body: DataRowBody) -> Result { + let ranges = body.ranges().collect().map_err(Error::parse)?; + Ok(StringRow { body, ranges }) + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn len(&self) -> usize { + self.ranges.len() + } + + pub fn get(&self, idx: usize) -> Option<&str> { + match self.try_get(idx) { + Ok(Some(ok)) => ok, + Err(err) => panic!("error retrieving column {}: {}", idx, err), + Ok(None) => panic!("no such column {}", idx), + } + } + + #[allow(clippy::option_option)] // FIXME + pub fn try_get(&self, idx: usize) -> Result>, Error> { + let buf = match self.ranges.get(idx) { + Some(range) => range.clone().map(|r| &self.body.buffer()[r]), + None => return Ok(None), + }; + + let v = match buf { + Some(buf) => { + let s = str::from_utf8(buf).map_err(|e| Error::from_sql(Box::new(e)))?; + Some(s) + } + None => None, + }; + + Ok(Some(v)) + } +} diff --git a/tokio-postgres/src/stmt.rs b/tokio-postgres/src/stmt.rs index be6cc9f49..bf18dc076 100644 --- a/tokio-postgres/src/stmt.rs +++ b/tokio-postgres/src/stmt.rs @@ -8,8 +8,7 @@ pub struct Column { } impl Column { - #[doc(hidden)] - pub fn new(name: String, type_: Type) -> Column { + pub(crate) fn new(name: String, type_: Type) -> Column { Column { name, type_ } } From e80e1fcaafe03718ce80f45f9fbdc01981364867 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 27 Dec 2018 17:36:04 -0500 Subject: [PATCH 074/819] Don't require passwords to be strings in protocol --- postgres-protocol/src/message/frontend.rs | 36 +++++++++++------------ tokio-postgres/src/proto/handshake.rs | 4 +-- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 35bc86be5..0ff0ddb65 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -45,7 +45,7 @@ pub enum Message<'a> { param_types: &'a [Oid], }, PasswordMessage { - password: &'a str, + password: &'a [u8], }, Query { query: &'a str, @@ -200,8 +200,8 @@ where buf.push(b'B'); write_body(buf, |buf| { - buf.write_cstr(portal)?; - buf.write_cstr(statement)?; + buf.write_cstr(portal.as_bytes())?; + buf.write_cstr(statement.as_bytes())?; write_counted(formats, |f, buf| buf.write_i16::(f), buf)?; write_counted( values, @@ -249,7 +249,7 @@ pub fn close(variant: u8, name: &str, buf: &mut Vec) -> io::Result<()> { buf.push(b'C'); write_body(buf, |buf| { buf.push(variant); - buf.write_cstr(name) + buf.write_cstr(name.as_bytes()) }) } @@ -272,7 +272,7 @@ pub fn copy_done(buf: &mut Vec) { #[inline] pub fn copy_fail(message: &str, buf: &mut Vec) -> io::Result<()> { buf.push(b'f'); - write_body(buf, |buf| buf.write_cstr(message)) + write_body(buf, |buf| buf.write_cstr(message.as_bytes())) } #[inline] @@ -280,7 +280,7 @@ pub fn describe(variant: u8, name: &str, buf: &mut Vec) -> io::Result<()> { buf.push(b'D'); write_body(buf, |buf| { buf.push(variant); - buf.write_cstr(name) + buf.write_cstr(name.as_bytes()) }) } @@ -288,7 +288,7 @@ pub fn describe(variant: u8, name: &str, buf: &mut Vec) -> io::Result<()> { pub fn execute(portal: &str, max_rows: i32, buf: &mut Vec) -> io::Result<()> { buf.push(b'E'); write_body(buf, |buf| { - buf.write_cstr(portal)?; + buf.write_cstr(portal.as_bytes())?; buf.write_i32::(max_rows).unwrap(); Ok(()) }) @@ -301,15 +301,15 @@ where { buf.push(b'P'); write_body(buf, |buf| { - buf.write_cstr(name)?; - buf.write_cstr(query)?; + buf.write_cstr(name.as_bytes())?; + buf.write_cstr(query.as_bytes())?; write_counted(param_types, |t, buf| buf.write_u32::(t), buf)?; Ok(()) }) } #[inline] -pub fn password_message(password: &str, buf: &mut Vec) -> io::Result<()> { +pub fn password_message(password: &[u8], buf: &mut Vec) -> io::Result<()> { buf.push(b'p'); write_body(buf, |buf| buf.write_cstr(password)) } @@ -317,14 +317,14 @@ pub fn password_message(password: &str, buf: &mut Vec) -> io::Result<()> { #[inline] pub fn query(query: &str, buf: &mut Vec) -> io::Result<()> { buf.push(b'Q'); - write_body(buf, |buf| buf.write_cstr(query)) + write_body(buf, |buf| buf.write_cstr(query.as_bytes())) } #[inline] pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut Vec) -> io::Result<()> { buf.push(b'p'); write_body(buf, |buf| { - buf.write_cstr(mechanism)?; + buf.write_cstr(mechanism.as_bytes())?; let len = i32::from_usize(data.len())?; buf.write_i32::(len)?; buf.extend_from_slice(data); @@ -354,8 +354,8 @@ where write_body(buf, |buf| { buf.write_i32::(196_608).unwrap(); for (key, value) in parameters { - buf.write_cstr(key)?; - buf.write_cstr(value)?; + buf.write_cstr(key.as_bytes())?; + buf.write_cstr(value.as_bytes())?; } buf.push(0); Ok(()) @@ -375,19 +375,19 @@ pub fn terminate(buf: &mut Vec) { } trait WriteCStr { - fn write_cstr(&mut self, s: &str) -> Result<(), io::Error>; + fn write_cstr(&mut self, s: &[u8]) -> Result<(), io::Error>; } impl WriteCStr for Vec { #[inline] - fn write_cstr(&mut self, s: &str) -> Result<(), io::Error> { - if s.as_bytes().contains(&0) { + fn write_cstr(&mut self, s: &[u8]) -> Result<(), io::Error> { + if s.contains(&0) { return Err(io::Error::new( io::ErrorKind::InvalidInput, "string contains embedded null", )); } - self.extend_from_slice(s.as_bytes()); + self.extend_from_slice(s); self.push(0); Ok(()) } diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 3089ffc11..3f7127b49 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -134,7 +134,7 @@ where Some(Message::AuthenticationCleartextPassword) => { let pass = state.password.ok_or_else(Error::missing_password)?; let mut buf = vec![]; - frontend::password_message(&pass, &mut buf).map_err(Error::encode)?; + frontend::password_message(pass.as_bytes(), &mut buf).map_err(Error::encode)?; transition!(SendingPassword { future: state.stream.send(buf) }) @@ -144,7 +144,7 @@ where let output = authentication::md5_hash(state.user.as_bytes(), pass.as_bytes(), body.salt()); let mut buf = vec![]; - frontend::password_message(&output, &mut buf).map_err(Error::encode)?; + frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; transition!(SendingPassword { future: state.stream.send(buf) }) From 635e6381b37f7dd59c97a1ede39b10a49ce458f6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 13:51:30 -0500 Subject: [PATCH 075/819] A less stringy builder This allows us to support things like non-utf8 passwords and unix socket directories. --- postgres/src/builder.rs | 34 +++++++ tokio-postgres-native-tls/src/test.rs | 17 ++-- tokio-postgres-openssl/src/test.rs | 17 ++-- tokio-postgres/src/builder.rs | 121 +++++++++++++++++++---- tokio-postgres/src/proto/connect.rs | 98 ++++++++---------- tokio-postgres/src/proto/connect_once.rs | 94 ++++++++---------- tokio-postgres/src/proto/handshake.rs | 76 +++++++------- tokio-postgres/tests/test/parse.rs | 22 ++--- 8 files changed, 278 insertions(+), 201 deletions(-) diff --git a/postgres/src/builder.rs b/postgres/src/builder.rs index 43f35e6dd..4eb755268 100644 --- a/postgres/src/builder.rs +++ b/postgres/src/builder.rs @@ -1,7 +1,9 @@ use futures::sync::oneshot; use futures::Future; use log::error; +use std::path::Path; use std::str::FromStr; +use std::time::Duration; use tokio_postgres::{Error, MakeTlsMode, Socket, TlsMode}; use crate::{Client, RUNTIME}; @@ -19,11 +21,43 @@ impl Builder { Builder(tokio_postgres::Builder::new()) } + pub fn host(&mut self, host: &str) -> &mut Builder { + self.0.host(host); + self + } + + #[cfg(unix)] + pub fn host_path(&mut self, host: T) -> &mut Builder + where + T: AsRef, + { + self.0.host_path(host); + self + } + + pub fn port(&mut self, port: u16) -> &mut Builder { + self.0.port(port); + self + } + pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { self.0.param(key, value); self } + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Builder { + self.0.connect_timeout(connect_timeout); + self + } + + pub fn password(&mut self, password: T) -> &mut Builder + where + T: AsRef<[u8]>, + { + self.0.password(password); + self + } + pub fn connect(&self, tls_mode: T) -> Result where T: MakeTlsMode + 'static + Send, diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 8e21bf0d1..78e7852e2 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -6,13 +6,15 @@ use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; use crate::TlsConnector; -fn smoke_test(builder: &tokio_postgres::Builder, tls: T) +fn smoke_test(s: &str, tls: T) where T: TlsMode, T::Stream: 'static, { let mut runtime = Runtime::new().unwrap(); + let builder = s.parse::().unwrap(); + let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) .and_then(|s| builder.handshake(s, tls)); @@ -42,9 +44,7 @@ fn require() { .build() .unwrap(); smoke_test( - tokio_postgres::Builder::new() - .user("ssl_user") - .dbname("postgres"), + "user=ssl_user dbname=postgres", RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -58,9 +58,7 @@ fn prefer() { .build() .unwrap(); smoke_test( - tokio_postgres::Builder::new() - .user("ssl_user") - .dbname("postgres"), + "user=ssl_user dbname=postgres", PreferTls(TlsConnector::with_connector(connector, "localhost")), ); } @@ -74,10 +72,7 @@ fn scram_user() { .build() .unwrap(); smoke_test( - tokio_postgres::Builder::new() - .user("scram_user") - .password("password") - .dbname("postgres"), + "user=scram_user password=password dbname=postgres", RequireTls(TlsConnector::with_connector(connector, "localhost")), ); } diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 28506735a..a85cc534f 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -6,13 +6,15 @@ use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; use super::*; -fn smoke_test(builder: &tokio_postgres::Builder, tls: T) +fn smoke_test(s: &str, tls: T) where T: TlsMode, T::Stream: 'static, { let mut runtime = Runtime::new().unwrap(); + let builder = s.parse::().unwrap(); + let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) .and_then(|s| builder.handshake(s, tls)); @@ -39,9 +41,7 @@ fn require() { builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( - tokio_postgres::Builder::new() - .user("ssl_user") - .dbname("postgres"), + "user=ssl_user dbname=postgres", RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -52,9 +52,7 @@ fn prefer() { builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( - tokio_postgres::Builder::new() - .user("ssl_user") - .dbname("postgres"), + "user=ssl_user dbname=postgres", PreferTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } @@ -65,10 +63,7 @@ fn scram_user() { builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( - tokio_postgres::Builder::new() - .user("scram_user") - .password("password") - .dbname("postgres"), + "user=scram_user password=password dbname=postgres", RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), ); } diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 69d999bed..ce3792fc5 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -1,6 +1,10 @@ use std::collections::hash_map::{self, HashMap}; use std::iter; +#[cfg(all(feature = "runtime", unix))] +use std::path::{Path, PathBuf}; use std::str::{self, FromStr}; +#[cfg(feature = "runtime")] +use std::time::Duration; use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] @@ -10,9 +14,24 @@ use crate::proto::HandshakeFuture; use crate::{Connect, MakeTlsMode, Socket}; use crate::{Error, Handshake, TlsMode}; -#[derive(Clone)] +#[cfg(feature = "runtime")] +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum Host { + Tcp(String), + #[cfg(unix)] + Unix(PathBuf), +} + +#[derive(Debug, Clone, PartialEq)] pub struct Builder { - params: HashMap, + pub(crate) params: HashMap, + pub(crate) password: Option>, + #[cfg(feature = "runtime")] + pub(crate) host: Vec, + #[cfg(feature = "runtime")] + pub(crate) port: Vec, + #[cfg(feature = "runtime")] + pub(crate) connect_timeout: Option, } impl Default for Builder { @@ -27,29 +46,64 @@ impl Builder { params.insert("client_encoding".to_string(), "UTF8".to_string()); params.insert("timezone".to_string(), "GMT".to_string()); - Builder { params } + Builder { + params, + password: None, + #[cfg(feature = "runtime")] + host: vec![], + #[cfg(feature = "runtime")] + port: vec![], + #[cfg(feature = "runtime")] + connect_timeout: None, + } } - pub fn user(&mut self, user: &str) -> &mut Builder { - self.param("user", user) + #[cfg(feature = "runtime")] + pub fn host(&mut self, host: &str) -> &mut Builder { + #[cfg(unix)] + { + if host.starts_with('/') { + self.host.push(Host::Unix(PathBuf::from(host))); + return self; + } + } + + self.host.push(Host::Tcp(host.to_string())); + self } - pub fn dbname(&mut self, database: &str) -> &mut Builder { - self.param("dbname", database) + #[cfg(all(feature = "runtime", unix))] + pub fn host_path(&mut self, host: T) -> &mut Builder + where + T: AsRef, + { + self.host.push(Host::Unix(host.as_ref().to_path_buf())); + self } - pub fn password(&mut self, password: &str) -> &mut Builder { - self.param("password", password) + #[cfg(feature = "runtime")] + pub fn port(&mut self, port: u16) -> &mut Builder { + self.port.push(port); + self } - pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { - self.params.insert(key.to_string(), value.to_string()); + #[cfg(feature = "runtime")] + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Builder { + self.connect_timeout = Some(connect_timeout); self } - /// FIXME do we want this? - pub fn iter(&self) -> Iter<'_> { - Iter(self.params.iter()) + pub fn password(&mut self, password: T) -> &mut Builder + where + T: AsRef<[u8]>, + { + self.password = Some(password.as_ref().to_vec()); + self + } + + pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + self.params.insert(key.to_string(), value.to_string()); + self } pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake @@ -57,7 +111,7 @@ impl Builder { S: AsyncRead + AsyncWrite, T: TlsMode, { - Handshake(HandshakeFuture::new(stream, tls_mode, self.params.clone())) + Handshake(HandshakeFuture::new(stream, tls_mode, self.clone())) } #[cfg(feature = "runtime")] @@ -65,7 +119,7 @@ impl Builder { where T: MakeTlsMode, { - Connect(ConnectFuture::new(make_tls_mode, self.params.clone())) + Connect(ConnectFuture::new(make_tls_mode, self.clone())) } } @@ -77,7 +131,40 @@ impl FromStr for Builder { let mut builder = Builder::new(); while let Some((key, value)) = parser.parameter()? { - builder.params.insert(key.to_string(), value); + match key { + "password" => { + builder.password(value); + } + #[cfg(feature = "runtime")] + "host" => { + for host in value.split(',') { + builder.host(host); + } + } + #[cfg(feature = "runtime")] + "port" => { + for port in value.split(',') { + let port = if port.is_empty() { + 5432 + } else { + port.parse().map_err(Error::invalid_port)? + }; + builder.port(port); + } + } + #[cfg(feature = "runtime")] + "connect_timeout" => { + let timeout = value + .parse::() + .map_err(Error::invalid_connect_timeout)?; + if timeout > 0 { + builder.connect_timeout(Duration::from_secs(timeout as u64)); + } + } + key => { + builder.param(key, &value); + } + } } Ok(builder) diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 167a6a429..512178ccb 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,10 +1,8 @@ use futures::{try_ready, Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::collections::HashMap; -use std::vec; use crate::proto::{Client, ConnectOnceFuture, Connection}; -use crate::{Error, MakeTlsMode, Socket}; +use crate::{Builder, Error, Host, MakeTlsMode, Socket}; #[derive(StateMachineFuture)] pub enum Connect @@ -12,25 +10,20 @@ where T: MakeTlsMode, { #[state_machine_future(start, transitions(MakingTlsMode))] - Start { - make_tls_mode: T, - params: HashMap, - }, + Start { make_tls_mode: T, config: Builder }, #[state_machine_future(transitions(Connecting))] MakingTlsMode { future: T::Future, - host: String, - port: u16, - addrs: vec::IntoIter<(String, u16)>, + idx: usize, make_tls_mode: T, - params: HashMap, + config: Builder, }, #[state_machine_future(transitions(MakingTlsMode, Finished))] Connecting { future: ConnectOnceFuture, - addrs: vec::IntoIter<(String, u16)>, + idx: usize, make_tls_mode: T, - params: HashMap, + config: Builder, }, #[state_machine_future(ready)] Finished((Client, Connection)), @@ -45,47 +38,27 @@ where fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); - let host = match state.params.remove("host") { - Some(host) => host, - None => return Err(Error::missing_host()), - }; - let mut addrs = host - .split(',') - .map(|s| (s.to_string(), 0u16)) - .collect::>(); - - let port = state.params.remove("port").unwrap_or_else(String::new); - let mut ports = port - .split(',') - .map(|s| { - if s.is_empty() { - Ok(5432) - } else { - s.parse::().map_err(Error::invalid_port) - } - }) - .collect::, _>>()?; - if ports.len() == 1 { - ports.resize(addrs.len(), ports[0]); - } - if addrs.len() != ports.len() { - return Err(Error::invalid_port_count()); + if state.config.host.is_empty() { + return Err(Error::missing_host()); } - for (addr, port) in addrs.iter_mut().zip(ports) { - addr.1 = port; + if state.config.port.len() > 1 && state.config.port.len() != state.config.host.len() { + return Err(Error::invalid_port_count()); } - let mut addrs = addrs.into_iter(); - let (host, port) = addrs.next().expect("addrs cannot be empty"); + let hostname = match &state.config.host[0] { + Host::Tcp(host) => &**host, + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter + #[cfg(unix)] + Host::Unix(_) => "", + }; + let future = state.make_tls_mode.make_tls_mode(hostname); transition!(MakingTlsMode { - future: state.make_tls_mode.make_tls_mode(&host), - host, - port, - addrs, + future, + idx: 0, make_tls_mode: state.make_tls_mode, - params: state.params, + config: state.config, }) } @@ -96,10 +69,10 @@ where let state = state.take(); transition!(Connecting { - future: ConnectOnceFuture::new(state.host, state.port, tls_mode, state.params.clone()), - addrs: state.addrs, + future: ConnectOnceFuture::new(state.idx, tls_mode, state.config.clone()), + idx: state.idx, make_tls_mode: state.make_tls_mode, - params: state.params, + config: state.config, }) } @@ -111,18 +84,25 @@ where Ok(Async::NotReady) => Ok(Async::NotReady), Err(e) => { let mut state = state.take(); - let (host, port) = match state.addrs.next() { - Some(addr) => addr, + let idx = state.idx + 1; + + let host = match state.config.host.get(idx) { + Some(host) => host, None => return Err(e), }; + let hostname = match host { + Host::Tcp(host) => &**host, + #[cfg(unix)] + Host::Unix(_) => "", + }; + let future = state.make_tls_mode.make_tls_mode(hostname); + transition!(MakingTlsMode { - future: state.make_tls_mode.make_tls_mode(&host), - host, - port, - addrs: state.addrs, + future, + idx, make_tls_mode: state.make_tls_mode, - params: state.params, + config: state.config, }) } } @@ -133,7 +113,7 @@ impl ConnectFuture where T: MakeTlsMode, { - pub fn new(make_tls_mode: T, params: HashMap) -> ConnectFuture { - Connect::start(make_tls_mode, params) + pub fn new(make_tls_mode: T, config: Builder) -> ConnectFuture { + Connect::start(make_tls_mode, config) } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 4dba5c3ca..da0f5778f 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -1,13 +1,14 @@ +#![allow(clippy::large_enum_variant)] + use futures::{try_ready, Async, Future, Poll}; use futures_cpupool::{CpuFuture, CpuPool}; use lazy_static::lazy_static; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::collections::HashMap; use std::io; use std::net::{SocketAddr, ToSocketAddrs}; #[cfg(unix)] use std::path::Path; -use std::time::{Duration, Instant}; +use std::time::Instant; use std::vec; use tokio_tcp::TcpStream; use tokio_timer::Delay; @@ -15,7 +16,7 @@ use tokio_timer::Delay; use tokio_uds::UnixStream; use crate::proto::{Client, Connection, HandshakeFuture}; -use crate::{Error, Socket, TlsMode}; +use crate::{Builder, Error, Host, Socket, TlsMode}; lazy_static! { static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() @@ -33,36 +34,32 @@ where #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] #[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))] Start { - host: String, - port: u16, + idx: usize, tls_mode: T, - params: HashMap, + config: Builder, }, #[cfg(unix)] #[state_machine_future(transitions(Handshaking))] ConnectingUnix { future: tokio_uds::ConnectFuture, - connect_timeout: Option, timeout: Option, tls_mode: T, - params: HashMap, + config: Builder, }, #[state_machine_future(transitions(ConnectingTcp))] ResolvingDns { future: CpuFuture, io::Error>, - connect_timeout: Option, timeout: Option, tls_mode: T, - params: HashMap, + config: Builder, }, #[state_machine_future(transitions(Handshaking))] ConnectingTcp { future: tokio_tcp::ConnectFuture, addrs: vec::IntoIter, - connect_timeout: Option, timeout: Option, tls_mode: T, - params: HashMap, + config: Builder, }, #[state_machine_future(transitions(Finished))] Handshaking { future: HandshakeFuture }, @@ -77,44 +74,41 @@ where T: TlsMode, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let mut state = state.take(); - - let connect_timeout = match state.params.remove("connect_timeout") { - Some(s) => { - let seconds = s.parse::().map_err(Error::invalid_connect_timeout)?; - if seconds <= 0 { - None - } else { - Some(Duration::from_secs(seconds as u64)) - } - } - None => None, - }; - let timeout = connect_timeout.map(|d| Delay::new(Instant::now() + d)); + let state = state.take(); - #[cfg(unix)] - { - if state.host.starts_with('/') { - let path = Path::new(&state.host).join(format!(".s.PGSQL.{}", state.port)); + let port = *state + .config + .port + .get(state.idx) + .or_else(|| state.config.port.get(0)) + .unwrap_or(&5432); + + let timeout = state + .config + .connect_timeout + .map(|d| Delay::new(Instant::now() + d)); + + match &state.config.host[state.idx] { + Host::Tcp(host) => { + let host = host.clone(); + transition!(ResolvingDns { + future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), + timeout, + tls_mode: state.tls_mode, + config: state.config, + }) + } + #[cfg(unix)] + Host::Unix(host) => { + let path = Path::new(host).join(format!(".s.PGSQL.{}", port)); transition!(ConnectingUnix { future: UnixStream::connect(path), - connect_timeout, timeout, tls_mode: state.tls_mode, - params: state.params, + config: state.config, }) } } - - let host = state.host; - let port = state.port; - transition!(ResolvingDns { - future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), - connect_timeout, - timeout, - tls_mode: state.tls_mode, - params: state.params, - }) } #[cfg(unix)] @@ -134,7 +128,7 @@ where let state = state.take(); transition!(Handshaking { - future: HandshakeFuture::new(stream, state.tls_mode, state.params) + future: HandshakeFuture::new(stream, state.tls_mode, state.config) }) } @@ -165,10 +159,9 @@ where transition!(ConnectingTcp { future: TcpStream::connect(&addr), addrs, - connect_timeout: state.connect_timeout, timeout: state.timeout, tls_mode: state.tls_mode, - params: state.params, + config: state.config, }) } @@ -202,7 +195,7 @@ where let stream = Socket::new_tcp(stream); transition!(Handshaking { - future: HandshakeFuture::new(stream, state.tls_mode, state.params), + future: HandshakeFuture::new(stream, state.tls_mode, state.config), }) } @@ -219,12 +212,7 @@ impl ConnectOnceFuture where T: TlsMode, { - pub fn new( - host: String, - port: u16, - tls_mode: T, - params: HashMap, - ) -> ConnectOnceFuture { - ConnectOnce::start(host, port, tls_mode, params) + pub fn new(idx: usize, tls_mode: T, config: Builder) -> ConnectOnceFuture { + ConnectOnce::start(idx, tls_mode, config) } } diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 3f7127b49..79bbb5134 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -13,7 +13,7 @@ use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; -use crate::{CancelData, ChannelBinding, Error, TlsMode}; +use crate::{Builder, CancelData, ChannelBinding, Error, TlsMode}; #[derive(StateMachineFuture)] pub enum Handshake @@ -24,20 +24,18 @@ where #[state_machine_future(start, transitions(SendingStartup))] Start { future: TlsFuture, - params: HashMap, + config: Builder, }, #[state_machine_future(transitions(ReadingAuth))] SendingStartup { future: sink::Send>, - user: String, - password: Option, + config: Builder, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] ReadingAuth { stream: Framed, - user: String, - password: Option, + config: Builder, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingAuthCompletion))] @@ -77,31 +75,24 @@ where { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let (stream, channel_binding) = try_ready!(state.future.poll()); - let mut state = state.take(); - - // we don't want to send the password as a param - let password = state.params.remove("password"); - - // libpq uses the parameter "dbname" but the protocol expects "database" (!?!) - if let Some(dbname) = state.params.remove("dbname") { - state.params.insert("database".to_string(), dbname); - } + let state = state.take(); let mut buf = vec![]; - frontend::startup_message(state.params.iter().map(|(k, v)| (&**k, &**v)), &mut buf) - .map_err(Error::encode)?; + frontend::startup_message( + state.config.params.iter().map(|(k, v)| { + // libpq uses dbname, but the backend expects database (!) + let k = if k == "dbname" { "database" } else { &**k }; + (k, &**v) + }), + &mut buf, + ) + .map_err(Error::encode)?; let stream = Framed::new(stream, PostgresCodec); - let user = state - .params - .remove("user") - .ok_or_else(Error::missing_user)?; - transition!(SendingStartup { future: stream.send(buf), - user, - password, + config: state.config, channel_binding, }) } @@ -113,8 +104,7 @@ where let state = state.take(); transition!(ReadingAuth { stream, - user: state.user, - password: state.password, + config: state.config, channel_binding: state.channel_binding, }) } @@ -132,17 +122,29 @@ where parameters: HashMap::new(), }), Some(Message::AuthenticationCleartextPassword) => { - let pass = state.password.ok_or_else(Error::missing_password)?; + let pass = state + .config + .password + .as_ref() + .ok_or_else(Error::missing_password)?; let mut buf = vec![]; - frontend::password_message(pass.as_bytes(), &mut buf).map_err(Error::encode)?; + frontend::password_message(pass, &mut buf).map_err(Error::encode)?; transition!(SendingPassword { future: state.stream.send(buf) }) } Some(Message::AuthenticationMd5Password(body)) => { - let pass = state.password.ok_or_else(Error::missing_password)?; - let output = - authentication::md5_hash(state.user.as_bytes(), pass.as_bytes(), body.salt()); + let user = state + .config + .params + .get("user") + .ok_or_else(Error::missing_user)?; + let pass = state + .config + .password + .as_ref() + .ok_or_else(Error::missing_password)?; + let output = authentication::md5_hash(user.as_bytes(), pass, body.salt()); let mut buf = vec![]; frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; transition!(SendingPassword { @@ -150,7 +152,11 @@ where }) } Some(Message::AuthenticationSasl(body)) => { - let pass = state.password.ok_or_else(Error::missing_password)?; + let pass = state + .config + .password + .as_ref() + .ok_or_else(Error::missing_password)?; let mut has_scram = false; let mut has_scram_plus = false; @@ -187,7 +193,7 @@ where return Err(Error::unsupported_authentication()); }; - let scram = ScramSha256::new(pass.as_bytes(), channel_binding); + let scram = ScramSha256::new(pass, channel_binding); let mut buf = vec![]; frontend::sasl_initial_response(mechanism, scram.message(), &mut buf) @@ -324,7 +330,7 @@ where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, params: HashMap) -> HandshakeFuture { - Handshake::start(TlsFuture::new(stream, tls_mode), params) + pub fn new(stream: S, tls_mode: T, config: Builder) -> HandshakeFuture { + Handshake::start(TlsFuture::new(stream, tls_mode), config) } } diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 236d48703..c5d2e0fa3 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,18 +1,14 @@ -use std::collections::HashMap; - #[test] fn pairs_ok() { let params = r"user=foo password=' fizz \'buzz\\ ' thing = ''" .parse::() .unwrap(); - let params = params.iter().collect::>(); - let mut expected = HashMap::new(); - expected.insert("user", "foo"); - expected.insert("password", r" fizz 'buzz\ "); - expected.insert("thing", ""); - expected.insert("client_encoding", "UTF8"); - expected.insert("timezone", "GMT"); + let mut expected = tokio_postgres::Builder::new(); + expected + .param("user", "foo") + .password(r" fizz 'buzz\ ") + .param("thing", ""); assert_eq!(params, expected); } @@ -22,13 +18,9 @@ fn pairs_ws() { let params = " user\t=\r\n\x0bfoo \t password = hunter2 " .parse::() .unwrap();; - let params = params.iter().collect::>(); - let mut expected = HashMap::new(); - expected.insert("user", "foo"); - expected.insert("password", r"hunter2"); - expected.insert("client_encoding", "UTF8"); - expected.insert("timezone", "GMT"); + let mut expected = tokio_postgres::Builder::new(); + expected.param("user", "foo").password("hunter2"); assert_eq!(params, expected); } From 540bcc5556eef6eb900627fe8ddb814767d1a62d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 13:55:09 -0500 Subject: [PATCH 076/819] Remove unused type --- tokio-postgres/src/builder.rs | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index ce3792fc5..91019156a 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -1,4 +1,4 @@ -use std::collections::hash_map::{self, HashMap}; +use std::collections::HashMap; use std::iter; #[cfg(all(feature = "runtime", unix))] use std::path::{Path, PathBuf}; @@ -171,23 +171,6 @@ impl FromStr for Builder { } } -#[derive(Debug, Clone)] -pub struct Iter<'a>(hash_map::Iter<'a, String, String>); - -impl<'a> Iterator for Iter<'a> { - type Item = (&'a str, &'a str); - - fn next(&mut self) -> Option<(&'a str, &'a str)> { - self.0.next().map(|(k, v)| (&**k, &**v)) - } -} - -impl<'a> ExactSizeIterator for Iter<'a> { - fn len(&self) -> usize { - self.0.len() - } -} - struct Parser<'a> { s: &'a str, it: iter::Peekable>, From 634d24a95119f717570f50b3f5e2292e42de9613 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 14:16:38 -0500 Subject: [PATCH 077/819] Wrap Builder in an Arc The builder ends up being cloned a couple of times per connection, so use Arc::get_mut to make that faster. --- tokio-postgres/src/builder.rs | 32 ++++++++++++++++-------- tokio-postgres/src/proto/connect.rs | 8 +++--- tokio-postgres/src/proto/connect_once.rs | 6 +++-- tokio-postgres/src/proto/handshake.rs | 6 ++++- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 91019156a..18221fbdd 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -3,6 +3,7 @@ use std::iter; #[cfg(all(feature = "runtime", unix))] use std::path::{Path, PathBuf}; use std::str::{self, FromStr}; +use std::sync::Arc; #[cfg(feature = "runtime")] use std::time::Duration; use tokio_io::{AsyncRead, AsyncWrite}; @@ -23,7 +24,7 @@ pub(crate) enum Host { } #[derive(Debug, Clone, PartialEq)] -pub struct Builder { +pub(crate) struct Inner { pub(crate) params: HashMap, pub(crate) password: Option>, #[cfg(feature = "runtime")] @@ -34,6 +35,9 @@ pub struct Builder { pub(crate) connect_timeout: Option, } +#[derive(Debug, Clone, PartialEq)] +pub struct Builder(pub(crate) Arc); + impl Default for Builder { fn default() -> Builder { Builder::new() @@ -46,7 +50,7 @@ impl Builder { params.insert("client_encoding".to_string(), "UTF8".to_string()); params.insert("timezone".to_string(), "GMT".to_string()); - Builder { + Builder(Arc::new(Inner { params, password: None, #[cfg(feature = "runtime")] @@ -55,7 +59,7 @@ impl Builder { port: vec![], #[cfg(feature = "runtime")] connect_timeout: None, - } + })) } #[cfg(feature = "runtime")] @@ -63,12 +67,16 @@ impl Builder { #[cfg(unix)] { if host.starts_with('/') { - self.host.push(Host::Unix(PathBuf::from(host))); + Arc::make_mut(&mut self.0) + .host + .push(Host::Unix(PathBuf::from(host))); return self; } } - self.host.push(Host::Tcp(host.to_string())); + Arc::make_mut(&mut self.0) + .host + .push(Host::Tcp(host.to_string())); self } @@ -77,19 +85,21 @@ impl Builder { where T: AsRef, { - self.host.push(Host::Unix(host.as_ref().to_path_buf())); + Arc::make_mut(&mut self.0) + .host + .push(Host::Unix(host.as_ref().to_path_buf())); self } #[cfg(feature = "runtime")] pub fn port(&mut self, port: u16) -> &mut Builder { - self.port.push(port); + Arc::make_mut(&mut self.0).port.push(port); self } #[cfg(feature = "runtime")] pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Builder { - self.connect_timeout = Some(connect_timeout); + Arc::make_mut(&mut self.0).connect_timeout = Some(connect_timeout); self } @@ -97,12 +107,14 @@ impl Builder { where T: AsRef<[u8]>, { - self.password = Some(password.as_ref().to_vec()); + Arc::make_mut(&mut self.0).password = Some(password.as_ref().to_vec()); self } pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { - self.params.insert(key.to_string(), value.to_string()); + Arc::make_mut(&mut self.0) + .params + .insert(key.to_string(), value.to_string()); self } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 512178ccb..f99cd87c6 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -38,15 +38,15 @@ where fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); - if state.config.host.is_empty() { + if state.config.0.host.is_empty() { return Err(Error::missing_host()); } - if state.config.port.len() > 1 && state.config.port.len() != state.config.host.len() { + if state.config.0.port.len() > 1 && state.config.0.port.len() != state.config.0.host.len() { return Err(Error::invalid_port_count()); } - let hostname = match &state.config.host[0] { + let hostname = match &state.config.0.host[0] { Host::Tcp(host) => &**host, // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] @@ -86,7 +86,7 @@ where let mut state = state.take(); let idx = state.idx + 1; - let host = match state.config.host.get(idx) { + let host = match state.config.0.host.get(idx) { Some(host) => host, None => return Err(e), }; diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index da0f5778f..70471bc68 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -78,17 +78,19 @@ where let port = *state .config + .0 .port .get(state.idx) - .or_else(|| state.config.port.get(0)) + .or_else(|| state.config.0.port.get(0)) .unwrap_or(&5432); let timeout = state .config + .0 .connect_timeout .map(|d| Delay::new(Instant::now() + d)); - match &state.config.host[state.idx] { + match &state.config.0.host[state.idx] { Host::Tcp(host) => { let host = host.clone(); transition!(ResolvingDns { diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 79bbb5134..1da91ff01 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -79,7 +79,7 @@ where let mut buf = vec![]; frontend::startup_message( - state.config.params.iter().map(|(k, v)| { + state.config.0.params.iter().map(|(k, v)| { // libpq uses dbname, but the backend expects database (!) let k = if k == "dbname" { "database" } else { &**k }; (k, &**v) @@ -124,6 +124,7 @@ where Some(Message::AuthenticationCleartextPassword) => { let pass = state .config + .0 .password .as_ref() .ok_or_else(Error::missing_password)?; @@ -136,11 +137,13 @@ where Some(Message::AuthenticationMd5Password(body)) => { let user = state .config + .0 .params .get("user") .ok_or_else(Error::missing_user)?; let pass = state .config + .0 .password .as_ref() .ok_or_else(Error::missing_password)?; @@ -154,6 +157,7 @@ where Some(Message::AuthenticationSasl(body)) => { let pass = state .config + .0 .password .as_ref() .ok_or_else(Error::missing_password)?; From 23552b44a488cc17acfb7153b755b1e3a012fa2b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 14:33:27 -0500 Subject: [PATCH 078/819] Simplify unix path handling a bit --- tokio-postgres/src/builder.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index 18221fbdd..c64ad7290 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -67,10 +67,7 @@ impl Builder { #[cfg(unix)] { if host.starts_with('/') { - Arc::make_mut(&mut self.0) - .host - .push(Host::Unix(PathBuf::from(host))); - return self; + return self.host_path(host); } } From 919fa52a5e33b908864d97e98ce97a044d3c6163 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 15:59:17 -0500 Subject: [PATCH 079/819] Add bind and query_portal to sync API --- postgres/src/lib.rs | 2 ++ postgres/src/portal.rs | 1 + postgres/src/test.rs | 30 ++++++++++++++++++++++++++++++ postgres/src/transaction.rs | 23 ++++++++++++++++++++++- 4 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 postgres/src/portal.rs diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 12967cdc9..d555e8210 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -6,6 +6,7 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] mod builder; mod client; +mod portal; mod query; mod statement; mod transaction; @@ -17,6 +18,7 @@ mod test; #[cfg(feature = "runtime")] pub use crate::builder::*; pub use crate::client::*; +pub use crate::portal::*; pub use crate::query::*; pub use crate::statement::*; pub use crate::transaction::*; diff --git a/postgres/src/portal.rs b/postgres/src/portal.rs new file mode 100644 index 000000000..cf22e5755 --- /dev/null +++ b/postgres/src/portal.rs @@ -0,0 +1 @@ +pub struct Portal(pub(crate) tokio_postgres::Portal); diff --git a/postgres/src/test.rs b/postgres/src/test.rs index e7bb778a5..e86f243dd 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -198,3 +198,33 @@ fn copy_out() { client.batch_execute("SELECT 1").unwrap(); } + +#[test] +fn portal() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo (id INT); + + INSERT INTO foo (id) VALUES (1), (2), (3); + ", + ) + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + let portal = transaction + .bind("SELECT * FROM foo ORDER BY id", &[]) + .unwrap(); + + let rows = transaction.query_portal(&portal, 2).unwrap(); + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[1].get::<_, i32>(0), 2); + + let rows = transaction.query_portal(&portal, 2).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 3); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 0fa19b9b2..e526d5276 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,8 +1,9 @@ +use futures::{Future, Stream}; use std::io::Read; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; -use crate::{Client, CopyOutReader, Query, Statement}; +use crate::{Client, CopyOutReader, Portal, Query, Statement}; pub struct Transaction<'a> { client: &'a mut Client, @@ -73,6 +74,26 @@ impl<'a> Transaction<'a> { self.client.query(query, params) } + pub fn bind(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + where + T: ?Sized + Query, + { + let statement = query.__statement(&mut self.client)?; + self.client + .get_mut() + .bind(&statement.0, params) + .wait() + .map(Portal) + } + + pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { + self.client + .get_mut() + .query_portal(&portal.0, max_rows) + .collect() + .wait() + } + pub fn copy_in( &mut self, query: &T, From 45593f5ad0e66bc08789105d4b69bf0a9a434f3b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 20:20:31 -0800 Subject: [PATCH 080/819] Rename Query to ToStatement --- postgres/src/client.rs | 10 +++++----- postgres/src/lib.rs | 4 ++-- postgres/src/{query.rs => to_statement.rs} | 6 +++--- postgres/src/transaction.rs | 12 ++++++------ 4 files changed, 16 insertions(+), 16 deletions(-) rename postgres/src/{query.rs => to_statement.rs} (83%) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 6f0bea565..c2f691d22 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -10,7 +10,7 @@ use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] use crate::Builder; -use crate::{Query, Statement, Transaction}; +use crate::{Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -42,7 +42,7 @@ impl Client { pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where - T: ?Sized + Query, + T: ?Sized + ToStatement, { let statement = query.__statement(self)?; self.0.execute(&statement.0, params).wait() @@ -50,7 +50,7 @@ impl Client { pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where - T: ?Sized + Query, + T: ?Sized + ToStatement, { let statement = query.__statement(self)?; self.0.query(&statement.0, params).collect().wait() @@ -63,7 +63,7 @@ impl Client { reader: R, ) -> Result where - T: ?Sized + Query, + T: ?Sized + ToStatement, R: Read, { let statement = query.__statement(self)?; @@ -78,7 +78,7 @@ impl Client { params: &[&dyn ToSql], ) -> Result, Error> where - T: ?Sized + Query, + T: ?Sized + ToStatement, { let statement = query.__statement(self)?; let mut stream = self.0.copy_out(&statement.0, params).wait(); diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index d555e8210..197be6985 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -7,8 +7,8 @@ use tokio::runtime::{self, Runtime}; mod builder; mod client; mod portal; -mod query; mod statement; +mod to_statement; mod transaction; #[cfg(feature = "runtime")] @@ -19,8 +19,8 @@ mod test; pub use crate::builder::*; pub use crate::client::*; pub use crate::portal::*; -pub use crate::query::*; pub use crate::statement::*; +pub use crate::to_statement::*; pub use crate::transaction::*; #[cfg(feature = "runtime")] diff --git a/postgres/src/query.rs b/postgres/src/to_statement.rs similarity index 83% rename from postgres/src/query.rs rename to postgres/src/to_statement.rs index 4910fcd0d..2f24d2035 100644 --- a/postgres/src/query.rs +++ b/postgres/src/to_statement.rs @@ -6,14 +6,14 @@ mod sealed { pub trait Sealed {} } -pub trait Query: sealed::Sealed { +pub trait ToStatement: sealed::Sealed { #[doc(hidden)] fn __statement(&self, client: &mut Client) -> Result; } impl sealed::Sealed for str {} -impl Query for str { +impl ToStatement for str { fn __statement(&self, client: &mut Client) -> Result { client.prepare(self) } @@ -21,7 +21,7 @@ impl Query for str { impl sealed::Sealed for Statement {} -impl Query for Statement { +impl ToStatement for Statement { fn __statement(&self, _: &mut Client) -> Result { Ok(self.clone()) } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index e526d5276..2c2e180cb 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -3,7 +3,7 @@ use std::io::Read; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row}; -use crate::{Client, CopyOutReader, Portal, Query, Statement}; +use crate::{Client, CopyOutReader, Portal, Statement, ToStatement}; pub struct Transaction<'a> { client: &'a mut Client, @@ -62,21 +62,21 @@ impl<'a> Transaction<'a> { pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where - T: ?Sized + Query, + T: ?Sized + ToStatement, { self.client.execute(query, params) } pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where - T: ?Sized + Query, + T: ?Sized + ToStatement, { self.client.query(query, params) } pub fn bind(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where - T: ?Sized + Query, + T: ?Sized + ToStatement, { let statement = query.__statement(&mut self.client)?; self.client @@ -101,7 +101,7 @@ impl<'a> Transaction<'a> { reader: R, ) -> Result where - T: ?Sized + Query, + T: ?Sized + ToStatement, R: Read, { self.client.copy_in(query, params, reader) @@ -113,7 +113,7 @@ impl<'a> Transaction<'a> { params: &[&dyn ToSql], ) -> Result, Error> where - T: ?Sized + Query, + T: ?Sized + ToStatement, { self.client.copy_out(query, params) } From 5169820d6a82f2f20bb0aa91981ea00b34ad7e28 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 20:39:32 -0800 Subject: [PATCH 081/819] Return iterators from query in sync API --- postgres/Cargo.toml | 1 + postgres/src/client.rs | 8 +++--- postgres/src/lib.rs | 4 +++ postgres/src/query.rs | 36 ++++++++++++++++++++++++ postgres/src/query_portal.rs | 36 ++++++++++++++++++++++++ postgres/src/test.rs | 53 ++++++++++++++++++++++++++++++------ postgres/src/transaction.rs | 22 ++++++++------- 7 files changed, 138 insertions(+), 22 deletions(-) create mode 100644 postgres/src/query.rs create mode 100644 postgres/src/query_portal.rs diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index a59c0794d..2a203e3a6 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -11,6 +11,7 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" +fallible-iterator = "0.1" futures = "0.1" tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c2f691d22..4ccb9b5a3 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -4,13 +4,13 @@ use futures::{Async, Future, Poll, Stream}; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; use tokio_postgres::types::{ToSql, Type}; -use tokio_postgres::{Error, Row}; +use tokio_postgres::Error; #[cfg(feature = "runtime")] use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] use crate::Builder; -use crate::{Statement, ToStatement, Transaction}; +use crate::{Query, Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -48,12 +48,12 @@ impl Client { self.0.execute(&statement.0, params).wait() } - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - self.0.query(&statement.0, params).collect().wait() + Ok(Query::new(self.0.query(&statement.0, params))) } pub fn copy_in( diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 197be6985..638ef26ce 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -7,6 +7,8 @@ use tokio::runtime::{self, Runtime}; mod builder; mod client; mod portal; +mod query; +mod query_portal; mod statement; mod to_statement; mod transaction; @@ -19,6 +21,8 @@ mod test; pub use crate::builder::*; pub use crate::client::*; pub use crate::portal::*; +pub use crate::query::*; +pub use crate::query_portal::*; pub use crate::statement::*; pub use crate::to_statement::*; pub use crate::transaction::*; diff --git a/postgres/src/query.rs b/postgres/src/query.rs new file mode 100644 index 000000000..e7eb7948c --- /dev/null +++ b/postgres/src/query.rs @@ -0,0 +1,36 @@ +use fallible_iterator::FallibleIterator; +use futures::stream::{self, Stream}; +use std::marker::PhantomData; +use tokio_postgres::{Error, Row}; + +pub struct Query<'a> { + it: stream::Wait, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend the borrow until drop +impl<'a> Drop for Query<'a> { + fn drop(&mut self) {} +} + +impl<'a> Query<'a> { + pub(crate) fn new(stream: tokio_postgres::Query) -> Query<'a> { + Query { + it: stream.wait(), + _p: PhantomData, + } + } +} + +impl<'a> FallibleIterator for Query<'a> { + type Item = Row; + type Error = Error; + + fn next(&mut self) -> Result, Error> { + match self.it.next() { + Some(Ok(row)) => Ok(Some(row)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } +} diff --git a/postgres/src/query_portal.rs b/postgres/src/query_portal.rs new file mode 100644 index 000000000..0ed8250a0 --- /dev/null +++ b/postgres/src/query_portal.rs @@ -0,0 +1,36 @@ +use fallible_iterator::FallibleIterator; +use futures::stream::{self, Stream}; +use std::marker::PhantomData; +use tokio_postgres::{Error, Row}; + +pub struct QueryPortal<'a> { + it: stream::Wait, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend the borrow until drop +impl<'a> Drop for QueryPortal<'a> { + fn drop(&mut self) {} +} + +impl<'a> QueryPortal<'a> { + pub(crate) fn new(stream: tokio_postgres::QueryPortal) -> QueryPortal<'a> { + QueryPortal { + it: stream.wait(), + _p: PhantomData, + } + } +} + +impl<'a> FallibleIterator for QueryPortal<'a> { + type Item = Row; + type Error = Error; + + fn next(&mut self) -> Result, Error> { + match self.it.next() { + Some(Ok(row)) => Ok(Some(row)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } +} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index e86f243dd..ed6242a17 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,3 +1,4 @@ +use fallible_iterator::FallibleIterator; use std::io::Read; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -20,7 +21,11 @@ fn query_prepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); let stmt = client.prepare("SELECT $1::TEXT").unwrap(); - let rows = client.query(&stmt, &[&"hello"]).unwrap(); + let rows = client + .query(&stmt, &[&"hello"]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -29,7 +34,11 @@ fn query_prepared() { fn query_unprepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); - let rows = client.query("SELECT $1::TEXT", &[&"hello"]).unwrap(); + let rows = client + .query("SELECT $1::TEXT", &[&"hello"]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -50,7 +59,11 @@ fn transaction_commit() { transaction.commit().unwrap(); - let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + let rows = client + .query("SELECT * FROM foo", &[]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); } @@ -71,7 +84,11 @@ fn transaction_rollback() { transaction.rollback().unwrap(); - let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + let rows = client + .query("SELECT * FROM foo", &[]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 0); } @@ -91,7 +108,11 @@ fn transaction_drop() { drop(transaction); - let rows = client.query("SELECT * FROM foo", &[]).unwrap(); + let rows = client + .query("SELECT * FROM foo", &[]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 0); } @@ -119,6 +140,8 @@ fn nested_transactions() { let rows = transaction .query("SELECT id FROM foo ORDER BY id", &[]) + .unwrap() + .collect::>() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -139,7 +162,11 @@ fn nested_transactions() { transaction3.commit().unwrap(); transaction.commit().unwrap(); - let rows = client.query("SELECT id FROM foo ORDER BY id", &[]).unwrap(); + let rows = client + .query("SELECT id FROM foo ORDER BY id", &[]) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 3); assert_eq!(rows[0].get::<_, i32>(0), 1); assert_eq!(rows[1].get::<_, i32>(0), 3); @@ -164,6 +191,8 @@ fn copy_in() { let rows = client .query("SELECT id, name FROM foo ORDER BY id", &[]) + .unwrap() + .collect::>() .unwrap(); assert_eq!(rows.len(), 2); @@ -219,12 +248,20 @@ fn portal() { .bind("SELECT * FROM foo ORDER BY id", &[]) .unwrap(); - let rows = transaction.query_portal(&portal, 2).unwrap(); + let rows = transaction + .query_portal(&portal, 2) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); assert_eq!(rows[1].get::<_, i32>(0), 2); - let rows = transaction.query_portal(&portal, 2).unwrap(); + let rows = transaction + .query_portal(&portal, 2) + .unwrap() + .collect::>() + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 2c2e180cb..4453f88c8 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,9 +1,9 @@ -use futures::{Future, Stream}; +use futures::Future; use std::io::Read; use tokio_postgres::types::{ToSql, Type}; -use tokio_postgres::{Error, Row}; +use tokio_postgres::Error; -use crate::{Client, CopyOutReader, Portal, Statement, ToStatement}; +use crate::{Client, CopyOutReader, Portal, Query, QueryPortal, Statement, ToStatement}; pub struct Transaction<'a> { client: &'a mut Client, @@ -67,7 +67,7 @@ impl<'a> Transaction<'a> { self.client.execute(query, params) } - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, { @@ -86,12 +86,14 @@ impl<'a> Transaction<'a> { .map(Portal) } - pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - self.client - .get_mut() - .query_portal(&portal.0, max_rows) - .collect() - .wait() + pub fn query_portal( + &mut self, + portal: &Portal, + max_rows: i32, + ) -> Result, Error> { + Ok(QueryPortal::new( + self.client.get_mut().query_portal(&portal.0, max_rows), + )) } pub fn copy_in( From fb027d79dbe655be04f3e6eed8d7065b34b45c47 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 21:01:10 -0800 Subject: [PATCH 082/819] Move CopyOutReader to its own module --- postgres/src/client.rs | 61 +++------------------------------ postgres/src/copy_out_reader.rs | 61 +++++++++++++++++++++++++++++++++ postgres/src/lib.rs | 2 ++ 3 files changed, 67 insertions(+), 57 deletions(-) create mode 100644 postgres/src/copy_out_reader.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 4ccb9b5a3..f4b9ef3a8 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,8 +1,5 @@ -use bytes::{Buf, Bytes}; -use futures::stream; use futures::{Async, Future, Poll, Stream}; -use std::io::{self, BufRead, Cursor, Read}; -use std::marker::PhantomData; +use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::Error; #[cfg(feature = "runtime")] @@ -10,7 +7,7 @@ use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] use crate::Builder; -use crate::{Query, Statement, ToStatement, Transaction}; +use crate::{CopyOutReader, Query, Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -81,19 +78,8 @@ impl Client { T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - let mut stream = self.0.copy_out(&statement.0, params).wait(); - - let cur = match stream.next() { - Some(Ok(cur)) => cur, - Some(Err(e)) => return Err(e), - None => Bytes::new(), - }; - - Ok(CopyOutReader { - stream, - cur: Cursor::new(cur), - _p: PhantomData, - }) + let stream = self.0.copy_out(&statement.0, params); + CopyOutReader::new(stream) } pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { @@ -145,42 +131,3 @@ where } } } - -pub struct CopyOutReader<'a> { - stream: stream::Wait, - cur: Cursor, - _p: PhantomData<&'a mut ()>, -} - -// no-op impl to extend borrow until drop -impl<'a> Drop for CopyOutReader<'a> { - fn drop(&mut self) {} -} - -impl<'a> Read for CopyOutReader<'a> { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let b = self.fill_buf()?; - let len = usize::min(buf.len(), b.len()); - buf[..len].copy_from_slice(&b[..len]); - self.consume(len); - Ok(len) - } -} - -impl<'a> BufRead for CopyOutReader<'a> { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - if self.cur.remaining() == 0 { - match self.stream.next() { - Some(Ok(cur)) => self.cur = Cursor::new(cur), - Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), - None => {} - }; - } - - Ok(Buf::bytes(&self.cur)) - } - - fn consume(&mut self, amt: usize) { - self.cur.advance(amt); - } -} diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs new file mode 100644 index 000000000..b8244f26e --- /dev/null +++ b/postgres/src/copy_out_reader.rs @@ -0,0 +1,61 @@ +use bytes::{Buf, Bytes}; +use futures::stream::{self, Stream}; +use std::io::{self, BufRead, Cursor, Read}; +use std::marker::PhantomData; +use tokio_postgres::Error; + +pub struct CopyOutReader<'a> { + it: stream::Wait, + cur: Cursor, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend borrow until drop +impl<'a> Drop for CopyOutReader<'a> { + fn drop(&mut self) {} +} + +impl<'a> CopyOutReader<'a> { + pub(crate) fn new(stream: tokio_postgres::CopyOut) -> Result, Error> { + let mut it = stream.wait(); + let cur = match it.next() { + Some(Ok(cur)) => cur, + Some(Err(e)) => return Err(e), + None => Bytes::new(), + }; + + Ok(CopyOutReader { + it, + cur: Cursor::new(cur), + _p: PhantomData, + }) + } +} + +impl<'a> Read for CopyOutReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let b = self.fill_buf()?; + let len = usize::min(buf.len(), b.len()); + buf[..len].copy_from_slice(&b[..len]); + self.consume(len); + Ok(len) + } +} + +impl<'a> BufRead for CopyOutReader<'a> { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + if self.cur.remaining() == 0 { + match self.it.next() { + Some(Ok(cur)) => self.cur = Cursor::new(cur), + Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), + None => {} + }; + } + + Ok(Buf::bytes(&self.cur)) + } + + fn consume(&mut self, amt: usize) { + self.cur.advance(amt); + } +} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 638ef26ce..ece72ca0f 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -6,6 +6,7 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] mod builder; mod client; +mod copy_out_reader; mod portal; mod query; mod query_portal; @@ -20,6 +21,7 @@ mod test; #[cfg(feature = "runtime")] pub use crate::builder::*; pub use crate::client::*; +pub use crate::copy_out_reader::*; pub use crate::portal::*; pub use crate::query::*; pub use crate::query_portal::*; From 6bcc7c47aeabd50f56c4bdbebe3d8575fc5b9260 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 28 Dec 2018 21:38:50 -0800 Subject: [PATCH 083/819] Fix clippy warning --- postgres/src/copy_out_reader.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index b8244f26e..9cf8174b0 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -16,6 +16,7 @@ impl<'a> Drop for CopyOutReader<'a> { } impl<'a> CopyOutReader<'a> { + #[allow(clippy::new_ret_no_self)] pub(crate) fn new(stream: tokio_postgres::CopyOut) -> Result, Error> { let mut it = stream.wait(); let cur = match it.next() { From af41875ea4676265e5873366e16fdde5508640d1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 09:57:49 -0800 Subject: [PATCH 084/819] derive traits for sync builder --- postgres/src/builder.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres/src/builder.rs b/postgres/src/builder.rs index 4eb755268..3a1ccfec3 100644 --- a/postgres/src/builder.rs +++ b/postgres/src/builder.rs @@ -8,6 +8,7 @@ use tokio_postgres::{Error, MakeTlsMode, Socket, TlsMode}; use crate::{Client, RUNTIME}; +#[derive(Debug, Clone, PartialEq)] pub struct Builder(tokio_postgres::Builder); impl Default for Builder { From 6ae93a0634db9cb726d868c6bc78f6840a4eab9e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 13:28:38 -0800 Subject: [PATCH 085/819] Add a convenience connect free function --- tokio-postgres-openssl/src/test.rs | 5 +---- tokio-postgres/src/builder.rs | 2 +- tokio-postgres/src/lib.rs | 8 ++++++++ tokio-postgres/src/proto/connect.rs | 17 +++++++++++------ tokio-postgres/tests/test/runtime.rs | 15 +++++++-------- 5 files changed, 28 insertions(+), 19 deletions(-) diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index a85cc534f..ee37202df 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -77,10 +77,7 @@ fn runtime() { builder.set_ca_file("../test/server.crt").unwrap(); let connector = MakeTlsConnector::new(builder.build()); - let connect = "host=localhost port=5433 user=postgres" - .parse::() - .unwrap() - .connect(RequireTls(connector)); + let connect = tokio_postgres::connect("host=localhost port=5433 user=postgres", RequireTls(connector)); let (mut client, connection) = runtime.block_on(connect).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/builder.rs index c64ad7290..998e4843f 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/builder.rs @@ -128,7 +128,7 @@ impl Builder { where T: MakeTlsMode, { - Connect(ConnectFuture::new(make_tls_mode, self.clone())) + Connect(ConnectFuture::new(make_tls_mode, Ok(self.clone()))) } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 660dc4db7..2b160a3e6 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -36,6 +36,14 @@ fn next_portal() -> String { format!("p{}", ID.fetch_add(1, Ordering::SeqCst)) } +#[cfg(feature = "runtime")] +pub fn connect(config: &str, tls_mode: T) -> Connect +where + T: MakeTlsMode, +{ + Connect(proto::ConnectFuture::new(tls_mode, config.parse())) +} + pub fn cancel_query(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelQuery where S: AsyncRead + AsyncWrite, diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index f99cd87c6..d272db166 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -10,7 +10,10 @@ where T: MakeTlsMode, { #[state_machine_future(start, transitions(MakingTlsMode))] - Start { make_tls_mode: T, config: Builder }, + Start { + make_tls_mode: T, + config: Result, + }, #[state_machine_future(transitions(Connecting))] MakingTlsMode { future: T::Future, @@ -38,15 +41,17 @@ where fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); - if state.config.0.host.is_empty() { + let config = state.config?; + + if config.0.host.is_empty() { return Err(Error::missing_host()); } - if state.config.0.port.len() > 1 && state.config.0.port.len() != state.config.0.host.len() { + if config.0.port.len() > 1 && config.0.port.len() != config.0.host.len() { return Err(Error::invalid_port_count()); } - let hostname = match &state.config.0.host[0] { + let hostname = match &config.0.host[0] { Host::Tcp(host) => &**host, // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] @@ -58,7 +63,7 @@ where future, idx: 0, make_tls_mode: state.make_tls_mode, - config: state.config, + config, }) } @@ -113,7 +118,7 @@ impl ConnectFuture where T: MakeTlsMode, { - pub fn new(make_tls_mode: T, config: Builder) -> ConnectFuture { + pub fn new(make_tls_mode: T, config: Result) -> ConnectFuture { Connect::start(make_tls_mode, config) } } diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 576ca02ff..67246876a 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,14 +1,10 @@ use futures::Future; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::{Client, Connection, Error, NoTls, Socket}; - -fn connect(s: &str) -> impl Future), Error = Error> { - s.parse::().unwrap().connect(NoTls) -} +use tokio_postgres::NoTls; fn smoke_test(s: &str) { let mut runtime = Runtime::new().unwrap(); - let connect = connect(s); + let connect = tokio_postgres::connect(s, NoTls); let (mut client, connection) = runtime.block_on(connect).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); @@ -41,9 +37,12 @@ fn multiple_hosts_multiple_ports() { #[test] fn wrong_port_count() { let mut runtime = Runtime::new().unwrap(); - let f = connect("host=localhost port=5433,5433 user=postgres"); + let f = tokio_postgres::connect("host=localhost port=5433,5433 user=postgres", NoTls); runtime.block_on(f).err().unwrap(); - let f = connect("host=localhost,localhost,localhost port=5433,5433 user=postgres"); + let f = tokio_postgres::connect( + "host=localhost,localhost,localhost port=5433,5433 user=postgres", + NoTls, + ); runtime.block_on(f).err().unwrap(); } From a3ff1f9a4c86d96f9a8b7d7bd15b0d388b7abf1a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 14:08:42 -0800 Subject: [PATCH 086/819] Rustfmt --- tokio-postgres-openssl/src/test.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index ee37202df..9ca6ffcac 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -77,7 +77,10 @@ fn runtime() { builder.set_ca_file("../test/server.crt").unwrap(); let connector = MakeTlsConnector::new(builder.build()); - let connect = tokio_postgres::connect("host=localhost port=5433 user=postgres", RequireTls(connector)); + let connect = tokio_postgres::connect( + "host=localhost port=5433 user=postgres", + RequireTls(connector), + ); let (mut client, connection) = runtime.block_on(connect).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); From 9116147aeece7501f273a8ddb56db5d375d32512 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 21:00:58 -0800 Subject: [PATCH 087/819] Rename Builder to Config --- postgres/src/client.rs | 8 ++--- postgres/src/{builder.rs => config.rs} | 32 ++++++++++---------- postgres/src/lib.rs | 4 +-- tokio-postgres-native-tls/src/test.rs | 2 +- tokio-postgres-openssl/src/test.rs | 2 +- tokio-postgres/src/{builder.rs => config.rs} | 32 ++++++++++---------- tokio-postgres/src/lib.rs | 4 +-- tokio-postgres/src/proto/connect.rs | 10 +++--- tokio-postgres/src/proto/connect_once.rs | 12 ++++---- tokio-postgres/src/proto/handshake.rs | 10 +++--- tokio-postgres/tests/test/main.rs | 2 +- tokio-postgres/tests/test/parse.rs | 8 ++--- 12 files changed, 63 insertions(+), 63 deletions(-) rename postgres/src/{builder.rs => config.rs} (66%) rename tokio-postgres/src/{builder.rs => config.rs} (92%) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index f4b9ef3a8..93d54da4b 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -6,7 +6,7 @@ use tokio_postgres::Error; use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; #[cfg(feature = "runtime")] -use crate::Builder; +use crate::Config; use crate::{CopyOutReader, Query, Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -21,12 +21,12 @@ impl Client { T::Future: Send, >::Future: Send, { - params.parse::()?.connect(tls_mode) + params.parse::()?.connect(tls_mode) } #[cfg(feature = "runtime")] - pub fn builder() -> Builder { - Builder::new() + pub fn builder() -> Config { + Config::new() } pub fn prepare(&mut self, query: &str) -> Result { diff --git a/postgres/src/builder.rs b/postgres/src/config.rs similarity index 66% rename from postgres/src/builder.rs rename to postgres/src/config.rs index 3a1ccfec3..5969a1d8e 100644 --- a/postgres/src/builder.rs +++ b/postgres/src/config.rs @@ -9,26 +9,26 @@ use tokio_postgres::{Error, MakeTlsMode, Socket, TlsMode}; use crate::{Client, RUNTIME}; #[derive(Debug, Clone, PartialEq)] -pub struct Builder(tokio_postgres::Builder); +pub struct Config(tokio_postgres::Config); -impl Default for Builder { - fn default() -> Builder { - Builder(tokio_postgres::Builder::default()) +impl Default for Config { + fn default() -> Config { + Config(tokio_postgres::Config::default()) } } -impl Builder { - pub fn new() -> Builder { - Builder(tokio_postgres::Builder::new()) +impl Config { + pub fn new() -> Config { + Config(tokio_postgres::Config::new()) } - pub fn host(&mut self, host: &str) -> &mut Builder { + pub fn host(&mut self, host: &str) -> &mut Config { self.0.host(host); self } #[cfg(unix)] - pub fn host_path(&mut self, host: T) -> &mut Builder + pub fn host_path(&mut self, host: T) -> &mut Config where T: AsRef, { @@ -36,22 +36,22 @@ impl Builder { self } - pub fn port(&mut self, port: u16) -> &mut Builder { + pub fn port(&mut self, port: u16) -> &mut Config { self.0.port(port); self } - pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + pub fn param(&mut self, key: &str, value: &str) -> &mut Config { self.0.param(key, value); self } - pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Builder { + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { self.0.connect_timeout(connect_timeout); self } - pub fn password(&mut self, password: T) -> &mut Builder + pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, { @@ -76,10 +76,10 @@ impl Builder { } } -impl FromStr for Builder { +impl FromStr for Config { type Err = Error; - fn from_str(s: &str) -> Result { - s.parse().map(Builder) + fn from_str(s: &str) -> Result { + s.parse().map(Config) } } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index ece72ca0f..582376966 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -4,7 +4,7 @@ use lazy_static::lazy_static; use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] -mod builder; +mod config; mod client; mod copy_out_reader; mod portal; @@ -19,7 +19,7 @@ mod transaction; mod test; #[cfg(feature = "runtime")] -pub use crate::builder::*; +pub use crate::config::*; pub use crate::client::*; pub use crate::copy_out_reader::*; pub use crate::portal::*; diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 78e7852e2..dda907f4b 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -13,7 +13,7 @@ where { let mut runtime = Runtime::new().unwrap(); - let builder = s.parse::().unwrap(); + let builder = s.parse::().unwrap(); let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 9ca6ffcac..9a1238496 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -13,7 +13,7 @@ where { let mut runtime = Runtime::new().unwrap(); - let builder = s.parse::().unwrap(); + let builder = s.parse::().unwrap(); let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) diff --git a/tokio-postgres/src/builder.rs b/tokio-postgres/src/config.rs similarity index 92% rename from tokio-postgres/src/builder.rs rename to tokio-postgres/src/config.rs index 998e4843f..f5510443b 100644 --- a/tokio-postgres/src/builder.rs +++ b/tokio-postgres/src/config.rs @@ -36,21 +36,21 @@ pub(crate) struct Inner { } #[derive(Debug, Clone, PartialEq)] -pub struct Builder(pub(crate) Arc); +pub struct Config(pub(crate) Arc); -impl Default for Builder { - fn default() -> Builder { - Builder::new() +impl Default for Config { + fn default() -> Config { + Config::new() } } -impl Builder { - pub fn new() -> Builder { +impl Config { + pub fn new() -> Config { let mut params = HashMap::new(); params.insert("client_encoding".to_string(), "UTF8".to_string()); params.insert("timezone".to_string(), "GMT".to_string()); - Builder(Arc::new(Inner { + Config(Arc::new(Inner { params, password: None, #[cfg(feature = "runtime")] @@ -63,7 +63,7 @@ impl Builder { } #[cfg(feature = "runtime")] - pub fn host(&mut self, host: &str) -> &mut Builder { + pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] { if host.starts_with('/') { @@ -78,7 +78,7 @@ impl Builder { } #[cfg(all(feature = "runtime", unix))] - pub fn host_path(&mut self, host: T) -> &mut Builder + pub fn host_path(&mut self, host: T) -> &mut Config where T: AsRef, { @@ -89,18 +89,18 @@ impl Builder { } #[cfg(feature = "runtime")] - pub fn port(&mut self, port: u16) -> &mut Builder { + pub fn port(&mut self, port: u16) -> &mut Config { Arc::make_mut(&mut self.0).port.push(port); self } #[cfg(feature = "runtime")] - pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Builder { + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { Arc::make_mut(&mut self.0).connect_timeout = Some(connect_timeout); self } - pub fn password(&mut self, password: T) -> &mut Builder + pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, { @@ -108,7 +108,7 @@ impl Builder { self } - pub fn param(&mut self, key: &str, value: &str) -> &mut Builder { + pub fn param(&mut self, key: &str, value: &str) -> &mut Config { Arc::make_mut(&mut self.0) .params .insert(key.to_string(), value.to_string()); @@ -132,12 +132,12 @@ impl Builder { } } -impl FromStr for Builder { +impl FromStr for Config { type Err = Error; - fn from_str(s: &str) -> Result { + fn from_str(s: &str) -> Result { let mut parser = Parser::new(s); - let mut builder = Builder::new(); + let mut builder = Config::new(); while let Some((key, value)) = parser.parameter()? { match key { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2b160a3e6..0e491480d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -6,7 +6,7 @@ use std::error::Error as StdError; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; -pub use crate::builder::*; +pub use crate::config::*; pub use crate::error::*; use crate::proto::CancelFuture; pub use crate::row::*; @@ -16,7 +16,7 @@ pub use crate::stmt::Column; pub use crate::tls::*; use crate::types::{ToSql, Type}; -mod builder; +mod config; pub mod error; mod proto; mod row; diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index d272db166..d37f32e5b 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -2,7 +2,7 @@ use futures::{try_ready, Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use crate::proto::{Client, ConnectOnceFuture, Connection}; -use crate::{Builder, Error, Host, MakeTlsMode, Socket}; +use crate::{Config, Error, Host, MakeTlsMode, Socket}; #[derive(StateMachineFuture)] pub enum Connect @@ -12,21 +12,21 @@ where #[state_machine_future(start, transitions(MakingTlsMode))] Start { make_tls_mode: T, - config: Result, + config: Result, }, #[state_machine_future(transitions(Connecting))] MakingTlsMode { future: T::Future, idx: usize, make_tls_mode: T, - config: Builder, + config: Config, }, #[state_machine_future(transitions(MakingTlsMode, Finished))] Connecting { future: ConnectOnceFuture, idx: usize, make_tls_mode: T, - config: Builder, + config: Config, }, #[state_machine_future(ready)] Finished((Client, Connection)), @@ -118,7 +118,7 @@ impl ConnectFuture where T: MakeTlsMode, { - pub fn new(make_tls_mode: T, config: Result) -> ConnectFuture { + pub fn new(make_tls_mode: T, config: Result) -> ConnectFuture { Connect::start(make_tls_mode, config) } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 70471bc68..2c21f8d8b 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -16,7 +16,7 @@ use tokio_timer::Delay; use tokio_uds::UnixStream; use crate::proto::{Client, Connection, HandshakeFuture}; -use crate::{Builder, Error, Host, Socket, TlsMode}; +use crate::{Config, Error, Host, Socket, TlsMode}; lazy_static! { static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() @@ -36,7 +36,7 @@ where Start { idx: usize, tls_mode: T, - config: Builder, + config: Config, }, #[cfg(unix)] #[state_machine_future(transitions(Handshaking))] @@ -44,14 +44,14 @@ where future: tokio_uds::ConnectFuture, timeout: Option, tls_mode: T, - config: Builder, + config: Config, }, #[state_machine_future(transitions(ConnectingTcp))] ResolvingDns { future: CpuFuture, io::Error>, timeout: Option, tls_mode: T, - config: Builder, + config: Config, }, #[state_machine_future(transitions(Handshaking))] ConnectingTcp { @@ -59,7 +59,7 @@ where addrs: vec::IntoIter, timeout: Option, tls_mode: T, - config: Builder, + config: Config, }, #[state_machine_future(transitions(Finished))] Handshaking { future: HandshakeFuture }, @@ -214,7 +214,7 @@ impl ConnectOnceFuture where T: TlsMode, { - pub fn new(idx: usize, tls_mode: T, config: Builder) -> ConnectOnceFuture { + pub fn new(idx: usize, tls_mode: T, config: Config) -> ConnectOnceFuture { ConnectOnce::start(idx, tls_mode, config) } } diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 1da91ff01..367905282 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -13,7 +13,7 @@ use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; -use crate::{Builder, CancelData, ChannelBinding, Error, TlsMode}; +use crate::{CancelData, ChannelBinding, Config, Error, TlsMode}; #[derive(StateMachineFuture)] pub enum Handshake @@ -24,18 +24,18 @@ where #[state_machine_future(start, transitions(SendingStartup))] Start { future: TlsFuture, - config: Builder, + config: Config, }, #[state_machine_future(transitions(ReadingAuth))] SendingStartup { future: sink::Send>, - config: Builder, + config: Config, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] ReadingAuth { stream: Framed, - config: Builder, + config: Config, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingAuthCompletion))] @@ -334,7 +334,7 @@ where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, config: Builder) -> HandshakeFuture { + pub fn new(stream: S, tls_mode: T, config: Config) -> HandshakeFuture { Handshake::start(TlsFuture::new(stream, tls_mode), config) } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 1f72a7eae..59f01238f 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -22,7 +22,7 @@ mod types; fn connect( s: &str, ) -> impl Future), Error = tokio_postgres::Error> { - let builder = s.parse::().unwrap(); + let builder = s.parse::().unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) .and_then(move |s| builder.handshake(s, NoTls)) diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index c5d2e0fa3..ac320d20b 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,10 +1,10 @@ #[test] fn pairs_ok() { let params = r"user=foo password=' fizz \'buzz\\ ' thing = ''" - .parse::() + .parse::() .unwrap(); - let mut expected = tokio_postgres::Builder::new(); + let mut expected = tokio_postgres::Config::new(); expected .param("user", "foo") .password(r" fizz 'buzz\ ") @@ -16,10 +16,10 @@ fn pairs_ok() { #[test] fn pairs_ws() { let params = " user\t=\r\n\x0bfoo \t password = hunter2 " - .parse::() + .parse::() .unwrap();; - let mut expected = tokio_postgres::Builder::new(); + let mut expected = tokio_postgres::Config::new(); expected.param("user", "foo").password("hunter2"); assert_eq!(params, expected); From e4bb2aedfbbdc313e164e8c051088addbd83ae42 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 21:05:01 -0800 Subject: [PATCH 088/819] Rename Client::builder to Client::configure --- postgres/src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 93d54da4b..880523db4 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -25,7 +25,7 @@ impl Client { } #[cfg(feature = "runtime")] - pub fn builder() -> Config { + pub fn configure() -> Config { Config::new() } From 983de2ef9d5a82e4ff9f18ec1c032190adb39400 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Dec 2018 21:06:24 -0800 Subject: [PATCH 089/819] Rustfmt --- postgres/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 582376966..d3c68c89e 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -3,9 +3,9 @@ use lazy_static::lazy_static; #[cfg(feature = "runtime")] use tokio::runtime::{self, Runtime}; +mod client; #[cfg(feature = "runtime")] mod config; -mod client; mod copy_out_reader; mod portal; mod query; @@ -18,9 +18,9 @@ mod transaction; #[cfg(test)] mod test; +pub use crate::client::*; #[cfg(feature = "runtime")] pub use crate::config::*; -pub use crate::client::*; pub use crate::copy_out_reader::*; pub use crate::portal::*; pub use crate::query::*; From 38db34eb6a366fb7e695d93a886de5c07b22d319 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 30 Dec 2018 09:38:12 -0800 Subject: [PATCH 090/819] Support TCP keepalive --- tokio-postgres/src/config.rs | 34 ++++++++++++++++++++++++ tokio-postgres/src/error/mod.rs | 18 +++++++++++++ tokio-postgres/src/proto/connect_once.rs | 6 +++++ tokio-postgres/tests/test/parse.rs | 21 ++++++++++++++- 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index f5510443b..a3ec78bf4 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -33,6 +33,10 @@ pub(crate) struct Inner { pub(crate) port: Vec, #[cfg(feature = "runtime")] pub(crate) connect_timeout: Option, + #[cfg(feature = "runtime")] + pub(crate) keepalives: bool, + #[cfg(feature = "runtime")] + pub(crate) keepalives_idle: Duration, } #[derive(Debug, Clone, PartialEq)] @@ -59,6 +63,10 @@ impl Config { port: vec![], #[cfg(feature = "runtime")] connect_timeout: None, + #[cfg(feature = "runtime")] + keepalives: true, + #[cfg(feature = "runtime")] + keepalives_idle: Duration::from_secs(2 * 60 * 60), })) } @@ -100,6 +108,18 @@ impl Config { self } + #[cfg(feature = "runtime")] + pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { + Arc::make_mut(&mut self.0).keepalives = keepalives; + self + } + + #[cfg(feature = "runtime")] + pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { + Arc::make_mut(&mut self.0).keepalives_idle = keepalives_idle; + self + } + pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, @@ -170,6 +190,20 @@ impl FromStr for Config { builder.connect_timeout(Duration::from_secs(timeout as u64)); } } + #[cfg(feature = "runtime")] + "keepalives" => { + let keepalives = value.parse::().map_err(Error::invalid_keepalives)?; + builder.keepalives(keepalives != 0); + } + #[cfg(feature = "runtime")] + "keepalives_idle" => { + let keepalives_idle = value + .parse::() + .map_err(Error::invalid_keepalives_idle)?; + if keepalives_idle > 0 { + builder.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); + } + } key => { builder.param(key, &value); } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 1b7fca2cd..58f0e56ba 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -359,6 +359,10 @@ enum Kind { #[cfg(feature = "runtime")] InvalidConnectTimeout, #[cfg(feature = "runtime")] + InvalidKeepalives, + #[cfg(feature = "runtime")] + InvalidKeepalivesIdle, + #[cfg(feature = "runtime")] Timer, #[cfg(feature = "runtime")] ConnectTimeout, @@ -410,6 +414,10 @@ impl fmt::Display for Error { #[cfg(feature = "runtime")] Kind::InvalidConnectTimeout => "invalid connect_timeout", #[cfg(feature = "runtime")] + Kind::InvalidKeepalives => "invalid keepalives", + #[cfg(feature = "runtime")] + Kind::InvalidKeepalivesIdle => "invalid keepalives_value", + #[cfg(feature = "runtime")] Kind::Timer => "timer error", #[cfg(feature = "runtime")] Kind::ConnectTimeout => "timed out connecting to server", @@ -541,6 +549,16 @@ impl Error { Error::new(Kind::InvalidConnectTimeout, Some(Box::new(e))) } + #[cfg(feature = "runtime")] + pub(crate) fn invalid_keepalives(e: ParseIntError) -> Error { + Error::new(Kind::InvalidKeepalives, Some(Box::new(e))) + } + + #[cfg(feature = "runtime")] + pub(crate) fn invalid_keepalives_idle(e: ParseIntError) -> Error { + Error::new(Kind::InvalidKeepalivesIdle, Some(Box::new(e))) + } + #[cfg(feature = "runtime")] pub(crate) fn timer(e: tokio_timer::Error) -> Error { Error::new(Kind::Timer, Some(Box::new(e))) diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 2c21f8d8b..cfffe3b5d 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -194,6 +194,12 @@ where let state = state.take(); stream.set_nodelay(true).map_err(Error::connect)?; + if state.config.0.keepalives { + stream + .set_keepalive(Some(state.config.0.keepalives_idle)) + .map_err(Error::connect)?; + } + let stream = Socket::new_tcp(stream); transition!(Handshaking { diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index ac320d20b..74fedb38d 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,3 +1,6 @@ +#[cfg(feature = "runtime")] +use std::time::Duration; + #[test] fn pairs_ok() { let params = r"user=foo password=' fizz \'buzz\\ ' thing = ''" @@ -17,10 +20,26 @@ fn pairs_ok() { fn pairs_ws() { let params = " user\t=\r\n\x0bfoo \t password = hunter2 " .parse::() - .unwrap();; + .unwrap(); let mut expected = tokio_postgres::Config::new(); expected.param("user", "foo").password("hunter2"); assert_eq!(params, expected); } + +#[test] +#[cfg(feature = "runtime")] +fn settings() { + let params = "connect_timeout=3 keepalives=0 keepalives_idle=30" + .parse::() + .unwrap(); + + let mut expected = tokio_postgres::Config::new(); + expected + .connect_timeout(Duration::from_secs(3)) + .keepalives(false) + .keepalives_idle(Duration::from_secs(30)); + + assert_eq!(params, expected); +} From 45444d6c5129a656da69c0cf7b681b00eab50f4e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 30 Dec 2018 11:50:15 -0800 Subject: [PATCH 091/819] Support target_session_attrs Closes #399 --- tokio-postgres/src/config.rs | 31 ++++++++++++++ tokio-postgres/src/error/mod.rs | 18 ++++++++ tokio-postgres/src/proto/connect_once.rs | 53 ++++++++++++++++++++---- tokio-postgres/tests/test/parse.rs | 12 ++++-- tokio-postgres/tests/test/runtime.rs | 20 +++++++++ 5 files changed, 123 insertions(+), 11 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index a3ec78bf4..458cdf80e 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -15,6 +15,15 @@ use crate::proto::HandshakeFuture; use crate::{Connect, MakeTlsMode, Socket}; use crate::{Error, Handshake, TlsMode}; +#[cfg(feature = "runtime")] +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum TargetSessionAttrs { + Any, + ReadWrite, + #[doc(hidden)] + __NonExhaustive, +} + #[cfg(feature = "runtime")] #[derive(Debug, Clone, PartialEq)] pub(crate) enum Host { @@ -37,6 +46,8 @@ pub(crate) struct Inner { pub(crate) keepalives: bool, #[cfg(feature = "runtime")] pub(crate) keepalives_idle: Duration, + #[cfg(feature = "runtime")] + pub(crate) target_session_attrs: TargetSessionAttrs, } #[derive(Debug, Clone, PartialEq)] @@ -67,6 +78,8 @@ impl Config { keepalives: true, #[cfg(feature = "runtime")] keepalives_idle: Duration::from_secs(2 * 60 * 60), + #[cfg(feature = "runtime")] + target_session_attrs: TargetSessionAttrs::Any, })) } @@ -120,6 +133,15 @@ impl Config { self } + #[cfg(feature = "runtime")] + pub fn target_session_attrs( + &mut self, + target_session_attrs: TargetSessionAttrs, + ) -> &mut Config { + Arc::make_mut(&mut self.0).target_session_attrs = target_session_attrs; + self + } + pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, @@ -204,6 +226,15 @@ impl FromStr for Config { builder.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } + #[cfg(feature = "runtime")] + "target_session_attrs" => { + let target_session_attrs = match &*value { + "any" => TargetSessionAttrs::Any, + "read-write" => TargetSessionAttrs::ReadWrite, + _ => return Err(Error::invalid_target_session_attrs()), + }; + builder.target_session_attrs(target_session_attrs); + } key => { builder.param(key, &value); } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 58f0e56ba..1823387c1 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -361,11 +361,15 @@ enum Kind { #[cfg(feature = "runtime")] InvalidKeepalives, #[cfg(feature = "runtime")] + InvalidTargetSessionAttrs, + #[cfg(feature = "runtime")] InvalidKeepalivesIdle, #[cfg(feature = "runtime")] Timer, #[cfg(feature = "runtime")] ConnectTimeout, + #[cfg(feature = "runtime")] + ReadOnlyDatabase, } struct ErrorInner { @@ -418,9 +422,13 @@ impl fmt::Display for Error { #[cfg(feature = "runtime")] Kind::InvalidKeepalivesIdle => "invalid keepalives_value", #[cfg(feature = "runtime")] + Kind::InvalidTargetSessionAttrs => "invalid target_session_attrs", + #[cfg(feature = "runtime")] Kind::Timer => "timer error", #[cfg(feature = "runtime")] Kind::ConnectTimeout => "timed out connecting to server", + #[cfg(feature = "runtime")] + Kind::ReadOnlyDatabase => "the database was read-only", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -559,6 +567,11 @@ impl Error { Error::new(Kind::InvalidKeepalivesIdle, Some(Box::new(e))) } + #[cfg(feature = "runtime")] + pub(crate) fn invalid_target_session_attrs() -> Error { + Error::new(Kind::InvalidTargetSessionAttrs, None) + } + #[cfg(feature = "runtime")] pub(crate) fn timer(e: tokio_timer::Error) -> Error { Error::new(Kind::Timer, Some(Box::new(e))) @@ -568,4 +581,9 @@ impl Error { pub(crate) fn connect_timeout() -> Error { Error::new(Kind::ConnectTimeout, None) } + + #[cfg(feature = "runtime")] + pub(crate) fn read_only_database() -> Error { + Error::new(Kind::ReadOnlyDatabase, None) + } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index cfffe3b5d..bf92aa6a2 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -1,6 +1,6 @@ #![allow(clippy::large_enum_variant)] -use futures::{try_ready, Async, Future, Poll}; +use futures::{try_ready, Async, Future, Poll, Stream}; use futures_cpupool::{CpuFuture, CpuPool}; use lazy_static::lazy_static; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; @@ -15,8 +15,8 @@ use tokio_timer::Delay; #[cfg(unix)] use tokio_uds::UnixStream; -use crate::proto::{Client, Connection, HandshakeFuture}; -use crate::{Config, Error, Host, Socket, TlsMode}; +use crate::proto::{Client, Connection, HandshakeFuture, SimpleQueryStream}; +use crate::{Config, Error, Host, Socket, TargetSessionAttrs, TlsMode}; lazy_static! { static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() @@ -61,8 +61,17 @@ where tls_mode: T, config: Config, }, + #[state_machine_future(transitions(CheckingSessionAttrs, Finished))] + Handshaking { + future: HandshakeFuture, + target_session_attrs: TargetSessionAttrs, + }, #[state_machine_future(transitions(Finished))] - Handshaking { future: HandshakeFuture }, + CheckingSessionAttrs { + stream: SimpleQueryStream, + client: Client, + connection: Connection, + }, #[state_machine_future(ready)] Finished((Client, Connection)), #[state_machine_future(error)] @@ -130,7 +139,8 @@ where let state = state.take(); transition!(Handshaking { - future: HandshakeFuture::new(stream, state.tls_mode, state.config) + target_session_attrs: state.config.0.target_session_attrs, + future: HandshakeFuture::new(stream, state.tls_mode, state.config), }) } @@ -203,6 +213,7 @@ where let stream = Socket::new_tcp(stream); transition!(Handshaking { + target_session_attrs: state.config.0.target_session_attrs, future: HandshakeFuture::new(stream, state.tls_mode, state.config), }) } @@ -210,9 +221,37 @@ where fn poll_handshaking<'a>( state: &'a mut RentToOwn<'a, Handshaking>, ) -> Poll, Error> { - let r = try_ready!(state.future.poll()); + let (client, connection) = try_ready!(state.future.poll()); + + if let TargetSessionAttrs::ReadWrite = state.target_session_attrs { + transition!(CheckingSessionAttrs { + stream: client.batch_execute("SHOW transaction_read_only"), + client, + connection, + }) + } else { + transition!(Finished((client, connection))) + } + } - transition!(Finished(r)) + fn poll_checking_session_attrs<'a>( + state: &'a mut RentToOwn<'a, CheckingSessionAttrs>, + ) -> Poll, Error> { + if let Async::Ready(()) = state.connection.poll()? { + return Err(Error::closed()); + } + + match try_ready!(state.stream.poll()) { + Some(row) => { + if row.get(0) == Some("on") { + Err(Error::read_only_database()) + } else { + let state = state.take(); + transition!(Finished((state.client, state.connection))) + } + } + None => Err(Error::closed()), + } } } diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 74fedb38d..023454566 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,5 +1,7 @@ #[cfg(feature = "runtime")] use std::time::Duration; +#[cfg(feature = "runtime")] +use tokio_postgres::TargetSessionAttrs; #[test] fn pairs_ok() { @@ -31,15 +33,17 @@ fn pairs_ws() { #[test] #[cfg(feature = "runtime")] fn settings() { - let params = "connect_timeout=3 keepalives=0 keepalives_idle=30" - .parse::() - .unwrap(); + let params = + "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-write" + .parse::() + .unwrap(); let mut expected = tokio_postgres::Config::new(); expected .connect_timeout(Duration::from_secs(3)) .keepalives(false) - .keepalives_idle(Duration::from_secs(30)); + .keepalives_idle(Duration::from_secs(30)) + .target_session_attrs(TargetSessionAttrs::ReadWrite); assert_eq!(params, expected); } diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 67246876a..29df4d8c7 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -46,3 +46,23 @@ fn wrong_port_count() { ); runtime.block_on(f).err().unwrap(); } + +#[test] +fn target_session_attrs_ok() { + let mut runtime = Runtime::new().unwrap(); + let f = tokio_postgres::connect( + "host=localhost port=5433 user=postgres target_session_attrs=read-write", + NoTls, + ); + runtime.block_on(f).unwrap(); +} + +#[test] +fn target_session_attrs_err() { + let mut runtime = Runtime::new().unwrap(); + let f = tokio_postgres::connect( + "host=localhost port=5433 user=postgres target_session_attrs=read-write default_transaction_read_only=on", + NoTls, + ); + runtime.block_on(f).err().unwrap(); +} From a675b0b50a5f1618b417ac3ce783cd66a53a5f51 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 30 Dec 2018 20:07:03 -0800 Subject: [PATCH 092/819] Accept a limited set of parameters in Config This matches with libpq's behavior, which doesn't allow arbitrary parameters. You can still set arbitrary parameters, just through the `options` field. --- postgres/src/config.rs | 53 +++++++-- tokio-postgres/src/config.rs | 131 ++++++++++++++++------- tokio-postgres/src/error/mod.rs | 128 +++------------------- tokio-postgres/src/proto/connect.rs | 4 +- tokio-postgres/src/proto/connect_once.rs | 23 ++-- tokio-postgres/src/proto/handshake.rs | 49 +++++---- tokio-postgres/tests/test/parse.rs | 8 +- tokio-postgres/tests/test/runtime.rs | 3 +- 8 files changed, 204 insertions(+), 195 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 5969a1d8e..b6b3047d1 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,7 +4,7 @@ use log::error; use std::path::Path; use std::str::FromStr; use std::time::Duration; -use tokio_postgres::{Error, MakeTlsMode, Socket, TlsMode}; +use tokio_postgres::{Error, MakeTlsMode, Socket, TargetSessionAttrs, TlsMode}; use crate::{Client, RUNTIME}; @@ -22,6 +22,34 @@ impl Config { Config(tokio_postgres::Config::new()) } + pub fn user(&mut self, user: &str) -> &mut Config { + self.0.user(user); + self + } + + pub fn password(&mut self, password: T) -> &mut Config + where + T: AsRef<[u8]>, + { + self.0.password(password); + self + } + + pub fn dbname(&mut self, dbname: &str) -> &mut Config { + self.0.dbname(dbname); + self + } + + pub fn options(&mut self, options: &str) -> &mut Config { + self.0.options(options); + self + } + + pub fn application_name(&mut self, application_name: &str) -> &mut Config { + self.0.application_name(application_name); + self + } + pub fn host(&mut self, host: &str) -> &mut Config { self.0.host(host); self @@ -41,21 +69,26 @@ impl Config { self } - pub fn param(&mut self, key: &str, value: &str) -> &mut Config { - self.0.param(key, value); + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { + self.0.connect_timeout(connect_timeout); self } - pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { - self.0.connect_timeout(connect_timeout); + pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { + self.0.keepalives(keepalives); self } - pub fn password(&mut self, password: T) -> &mut Config - where - T: AsRef<[u8]>, - { - self.0.password(password); + pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { + self.0.keepalives_idle(keepalives_idle); + self + } + + pub fn target_session_attrs( + &mut self, + target_session_attrs: TargetSessionAttrs, + ) -> &mut Config { + self.0.target_session_attrs(target_session_attrs); self } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 458cdf80e..288601034 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,4 +1,5 @@ -use std::collections::HashMap; +use std::error; +use std::fmt; use std::iter; #[cfg(all(feature = "runtime", unix))] use std::path::{Path, PathBuf}; @@ -34,8 +35,11 @@ pub(crate) enum Host { #[derive(Debug, Clone, PartialEq)] pub(crate) struct Inner { - pub(crate) params: HashMap, + pub(crate) user: Option, pub(crate) password: Option>, + pub(crate) dbname: Option, + pub(crate) options: Option, + pub(crate) application_name: Option, #[cfg(feature = "runtime")] pub(crate) host: Vec, #[cfg(feature = "runtime")] @@ -61,13 +65,12 @@ impl Default for Config { impl Config { pub fn new() -> Config { - let mut params = HashMap::new(); - params.insert("client_encoding".to_string(), "UTF8".to_string()); - params.insert("timezone".to_string(), "GMT".to_string()); - Config(Arc::new(Inner { - params, + user: None, password: None, + dbname: None, + options: None, + application_name: None, #[cfg(feature = "runtime")] host: vec![], #[cfg(feature = "runtime")] @@ -83,6 +86,34 @@ impl Config { })) } + pub fn user(&mut self, user: &str) -> &mut Config { + Arc::make_mut(&mut self.0).user = Some(user.to_string()); + self + } + + pub fn password(&mut self, password: T) -> &mut Config + where + T: AsRef<[u8]>, + { + Arc::make_mut(&mut self.0).password = Some(password.as_ref().to_vec()); + self + } + + pub fn dbname(&mut self, dbname: &str) -> &mut Config { + Arc::make_mut(&mut self.0).dbname = Some(dbname.to_string()); + self + } + + pub fn options(&mut self, options: &str) -> &mut Config { + Arc::make_mut(&mut self.0).options = Some(options.to_string()); + self + } + + pub fn application_name(&mut self, application_name: &str) -> &mut Config { + Arc::make_mut(&mut self.0).application_name = Some(application_name.to_string()); + self + } + #[cfg(feature = "runtime")] pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] @@ -142,21 +173,6 @@ impl Config { self } - pub fn password(&mut self, password: T) -> &mut Config - where - T: AsRef<[u8]>, - { - Arc::make_mut(&mut self.0).password = Some(password.as_ref().to_vec()); - self - } - - pub fn param(&mut self, key: &str, value: &str) -> &mut Config { - Arc::make_mut(&mut self.0) - .params - .insert(key.to_string(), value.to_string()); - self - } - pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake where S: AsyncRead + AsyncWrite, @@ -183,9 +199,21 @@ impl FromStr for Config { while let Some((key, value)) = parser.parameter()? { match key { + "user" => { + builder.user(&value); + } "password" => { builder.password(value); } + "dbname" => { + builder.dbname(&value); + } + "options" => { + builder.options(&value); + } + "application_name" => { + builder.application_name(&value); + } #[cfg(feature = "runtime")] "host" => { for host in value.split(',') { @@ -198,30 +226,33 @@ impl FromStr for Config { let port = if port.is_empty() { 5432 } else { - port.parse().map_err(Error::invalid_port)? + port.parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("port"))))? }; builder.port(port); } } #[cfg(feature = "runtime")] "connect_timeout" => { - let timeout = value - .parse::() - .map_err(Error::invalid_connect_timeout)?; + let timeout = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("connect_timeout"))) + })?; if timeout > 0 { builder.connect_timeout(Duration::from_secs(timeout as u64)); } } #[cfg(feature = "runtime")] "keepalives" => { - let keepalives = value.parse::().map_err(Error::invalid_keepalives)?; + let keepalives = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; builder.keepalives(keepalives != 0); } #[cfg(feature = "runtime")] "keepalives_idle" => { - let keepalives_idle = value - .parse::() - .map_err(Error::invalid_keepalives_idle)?; + let keepalives_idle = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("keepalives_idle"))) + })?; if keepalives_idle > 0 { builder.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } @@ -231,12 +262,18 @@ impl FromStr for Config { let target_session_attrs = match &*value { "any" => TargetSessionAttrs::Any, "read-write" => TargetSessionAttrs::ReadWrite, - _ => return Err(Error::invalid_target_session_attrs()), + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "target_session_attrs", + )))) + } }; builder.target_session_attrs(target_session_attrs); } key => { - builder.param(key, &value); + return Err(Error::config_parse(Box::new(UnknownOption( + key.to_string(), + )))) } } } @@ -245,6 +282,28 @@ impl FromStr for Config { } } +#[derive(Debug)] +struct UnknownOption(String); + +impl fmt::Display for UnknownOption { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "unknown option `{}`", self.0) + } +} + +impl error::Error for UnknownOption {} + +#[derive(Debug)] +struct InvalidValue(&'static str); + +impl fmt::Display for InvalidValue { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "invalid value for option `{}`", self.0) + } +} + +impl error::Error for InvalidValue {} + struct Parser<'a> { s: &'a str, it: iter::Peekable>, @@ -290,9 +349,9 @@ impl<'a> Parser<'a> { "unexpected character at byte {}: expected `{}` but got `{}`", i, target, c ); - Err(Error::connection_syntax(m.into())) + Err(Error::config_parse(m.into())) } - None => Err(Error::connection_syntax("unexpected EOF".into())), + None => Err(Error::config_parse("unexpected EOF".into())), } } @@ -351,7 +410,7 @@ impl<'a> Parser<'a> { } if value.is_empty() { - return Err(Error::connection_syntax("unexpected EOF".into())); + return Err(Error::config_parse("unexpected EOF".into())); } Ok(value) @@ -375,7 +434,7 @@ impl<'a> Parser<'a> { } } - Err(Error::connection_syntax( + Err(Error::config_parse( "unterminated quoted connection parameter value".into(), )) } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 1823387c1..144340da6 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -5,8 +5,6 @@ use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; use std::error::{self, Error as _Error}; use std::fmt; use std::io; -#[cfg(feature = "runtime")] -use std::num::ParseIntError; pub use self::sqlstate::*; @@ -343,33 +341,11 @@ enum Kind { Db, Parse, Encode, - MissingUser, - MissingPassword, - UnsupportedAuthentication, Authentication, - ConnectionSyntax, + ConfigParse, + Config, #[cfg(feature = "runtime")] Connect, - #[cfg(feature = "runtime")] - MissingHost, - #[cfg(feature = "runtime")] - InvalidPort, - #[cfg(feature = "runtime")] - InvalidPortCount, - #[cfg(feature = "runtime")] - InvalidConnectTimeout, - #[cfg(feature = "runtime")] - InvalidKeepalives, - #[cfg(feature = "runtime")] - InvalidTargetSessionAttrs, - #[cfg(feature = "runtime")] - InvalidKeepalivesIdle, - #[cfg(feature = "runtime")] - Timer, - #[cfg(feature = "runtime")] - ConnectTimeout, - #[cfg(feature = "runtime")] - ReadOnlyDatabase, } struct ErrorInner { @@ -402,33 +378,11 @@ impl fmt::Display for Error { Kind::Db => "db error", Kind::Parse => "error parsing response from server", Kind::Encode => "error encoding message to server", - Kind::MissingUser => "username not provided", - Kind::MissingPassword => "password not provided", - Kind::UnsupportedAuthentication => "unsupported authentication method requested", Kind::Authentication => "authentication error", - Kind::ConnectionSyntax => "invalid connection string", + Kind::ConfigParse => "invalid connection string", + Kind::Config => "invalid configuration", #[cfg(feature = "runtime")] Kind::Connect => "error connecting to server", - #[cfg(feature = "runtime")] - Kind::MissingHost => "host not provided", - #[cfg(feature = "runtime")] - Kind::InvalidPort => "invalid port", - #[cfg(feature = "runtime")] - Kind::InvalidPortCount => "wrong number of ports provided", - #[cfg(feature = "runtime")] - Kind::InvalidConnectTimeout => "invalid connect_timeout", - #[cfg(feature = "runtime")] - Kind::InvalidKeepalives => "invalid keepalives", - #[cfg(feature = "runtime")] - Kind::InvalidKeepalivesIdle => "invalid keepalives_value", - #[cfg(feature = "runtime")] - Kind::InvalidTargetSessionAttrs => "invalid target_session_attrs", - #[cfg(feature = "runtime")] - Kind::Timer => "timer error", - #[cfg(feature = "runtime")] - Kind::ConnectTimeout => "timed out connecting to server", - #[cfg(feature = "runtime")] - Kind::ReadOnlyDatabase => "the database was read-only", }; fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { @@ -504,18 +458,6 @@ impl Error { Error::new(Kind::CopyInStream, Some(e.into())) } - pub(crate) fn missing_user() -> Error { - Error::new(Kind::MissingUser, None) - } - - pub(crate) fn missing_password() -> Error { - Error::new(Kind::MissingPassword, None) - } - - pub(crate) fn unsupported_authentication() -> Error { - Error::new(Kind::UnsupportedAuthentication, None) - } - pub(crate) fn tls(e: Box) -> Error { Error::new(Kind::Tls, Some(e)) } @@ -524,66 +466,20 @@ impl Error { Error::new(Kind::Io, Some(Box::new(e))) } - pub(crate) fn authentication(e: io::Error) -> Error { - Error::new(Kind::Authentication, Some(Box::new(e))) - } - - pub(crate) fn connection_syntax(e: Box) -> Error { - Error::new(Kind::ConnectionSyntax, Some(e)) - } - - #[cfg(feature = "runtime")] - pub(crate) fn connect(e: io::Error) -> Error { - Error::new(Kind::Connect, Some(Box::new(e))) - } - - #[cfg(feature = "runtime")] - pub(crate) fn missing_host() -> Error { - Error::new(Kind::MissingHost, None) + pub(crate) fn authentication(e: Box) -> Error { + Error::new(Kind::Authentication, Some(e)) } - #[cfg(feature = "runtime")] - pub(crate) fn invalid_port(e: ParseIntError) -> Error { - Error::new(Kind::InvalidPort, Some(Box::new(e))) + pub(crate) fn config_parse(e: Box) -> Error { + Error::new(Kind::ConfigParse, Some(e)) } - #[cfg(feature = "runtime")] - pub(crate) fn invalid_port_count() -> Error { - Error::new(Kind::InvalidPortCount, None) - } - - #[cfg(feature = "runtime")] - pub(crate) fn invalid_connect_timeout(e: ParseIntError) -> Error { - Error::new(Kind::InvalidConnectTimeout, Some(Box::new(e))) - } - - #[cfg(feature = "runtime")] - pub(crate) fn invalid_keepalives(e: ParseIntError) -> Error { - Error::new(Kind::InvalidKeepalives, Some(Box::new(e))) + pub(crate) fn config(e: Box) -> Error { + Error::new(Kind::Config, Some(e)) } #[cfg(feature = "runtime")] - pub(crate) fn invalid_keepalives_idle(e: ParseIntError) -> Error { - Error::new(Kind::InvalidKeepalivesIdle, Some(Box::new(e))) - } - - #[cfg(feature = "runtime")] - pub(crate) fn invalid_target_session_attrs() -> Error { - Error::new(Kind::InvalidTargetSessionAttrs, None) - } - - #[cfg(feature = "runtime")] - pub(crate) fn timer(e: tokio_timer::Error) -> Error { - Error::new(Kind::Timer, Some(Box::new(e))) - } - - #[cfg(feature = "runtime")] - pub(crate) fn connect_timeout() -> Error { - Error::new(Kind::ConnectTimeout, None) - } - - #[cfg(feature = "runtime")] - pub(crate) fn read_only_database() -> Error { - Error::new(Kind::ReadOnlyDatabase, None) + pub(crate) fn connect(e: io::Error) -> Error { + Error::new(Kind::Connect, Some(Box::new(e))) } } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index d37f32e5b..123a51e84 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -44,11 +44,11 @@ where let config = state.config?; if config.0.host.is_empty() { - return Err(Error::missing_host()); + return Err(Error::config("host missing".into())); } if config.0.port.len() > 1 && config.0.port.len() != config.0.host.len() { - return Err(Error::invalid_port_count()); + return Err(Error::config("invalid number of ports".into())); } let hostname = match &config.0.host[0] { diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index bf92aa6a2..c6591d1df 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -128,9 +128,11 @@ where ) -> Poll, Error> { if let Some(timeout) = &mut state.timeout { match timeout.poll() { - Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::Ready(())) => { + return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) + } Ok(Async::NotReady) => {} - Err(e) => return Err(Error::timer(e)), + Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), } } @@ -149,9 +151,11 @@ where ) -> Poll, Error> { if let Some(timeout) = &mut state.timeout { match timeout.poll() { - Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::Ready(())) => { + return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) + } Ok(Async::NotReady) => {} - Err(e) => return Err(Error::timer(e)), + Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), } } @@ -182,9 +186,11 @@ where ) -> Poll, Error> { if let Some(timeout) = &mut state.timeout { match timeout.poll() { - Ok(Async::Ready(())) => return Err(Error::connect_timeout()), + Ok(Async::Ready(())) => { + return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) + } Ok(Async::NotReady) => {} - Err(e) => return Err(Error::timer(e)), + Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), } } @@ -244,7 +250,10 @@ where match try_ready!(state.stream.poll()) { Some(row) => { if row.get(0) == Some("on") { - Err(Error::read_only_database()) + Err(Error::connect(io::Error::new( + io::ErrorKind::PermissionDenied, + "database does not allow writes", + ))) } else { let state = state.take(); transition!(Finished((state.client, state.connection))) diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 367905282..b3b1e47b9 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -77,16 +77,23 @@ where let (stream, channel_binding) = try_ready!(state.future.poll()); let state = state.take(); + let mut params = vec![("client_encoding", "UTF8"), ("timezone", "GMT")]; + if let Some(user) = &state.config.0.user { + params.push(("user", &**user)); + } + if let Some(dbname) = &state.config.0.dbname { + params.push(("database", &**dbname)); + } + if let Some(options) = &state.config.0.options { + params.push(("options", &**options)); + } + if let Some(application_name) = &state.config.0.application_name { + params.push(("application_name", &**application_name)); + } + let mut buf = vec![]; - frontend::startup_message( - state.config.0.params.iter().map(|(k, v)| { - // libpq uses dbname, but the backend expects database (!) - let k = if k == "dbname" { "database" } else { &**k }; - (k, &**v) - }), - &mut buf, - ) - .map_err(Error::encode)?; + frontend::startup_message(params.iter().map(|e| (e.0, e.1)), &mut buf) + .map_err(Error::encode)?; let stream = Framed::new(stream, PostgresCodec); @@ -127,7 +134,7 @@ where .0 .password .as_ref() - .ok_or_else(Error::missing_password)?; + .ok_or_else(|| Error::config("password missing".into()))?; let mut buf = vec![]; frontend::password_message(pass, &mut buf).map_err(Error::encode)?; transition!(SendingPassword { @@ -138,15 +145,15 @@ where let user = state .config .0 - .params - .get("user") - .ok_or_else(Error::missing_user)?; + .user + .as_ref() + .ok_or_else(|| Error::config("user missing".into()))?; let pass = state .config .0 .password .as_ref() - .ok_or_else(Error::missing_password)?; + .ok_or_else(|| Error::config("password missing".into()))?; let output = authentication::md5_hash(user.as_bytes(), pass, body.salt()); let mut buf = vec![]; frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; @@ -160,7 +167,7 @@ where .0 .password .as_ref() - .ok_or_else(Error::missing_password)?; + .ok_or_else(|| Error::config("password missing".into()))?; let mut has_scram = false; let mut has_scram_plus = false; @@ -194,7 +201,9 @@ where None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), } } else { - return Err(Error::unsupported_authentication()); + return Err(Error::authentication( + "unsupported authentication method".into(), + )); }; let scram = ScramSha256::new(pass, channel_binding); @@ -211,7 +220,9 @@ where Some(Message::AuthenticationKerberosV5) | Some(Message::AuthenticationScmCredential) | Some(Message::AuthenticationGss) - | Some(Message::AuthenticationSspi) => Err(Error::unsupported_authentication()), + | Some(Message::AuthenticationSspi) => Err(Error::authentication( + "unsupported authentication method".into(), + )), Some(Message::ErrorResponse(body)) => Err(Error::db(body)), Some(_) => Err(Error::unexpected_message()), None => Err(Error::closed()), @@ -247,7 +258,7 @@ where state .scram .update(body.data()) - .map_err(Error::authentication)?; + .map_err(|e| Error::authentication(Box::new(e)))?; let mut buf = vec![]; frontend::sasl_response(state.scram.message(), &mut buf).map_err(Error::encode)?; transition!(SendingSasl { @@ -259,7 +270,7 @@ where state .scram .finish(body.data()) - .map_err(Error::authentication)?; + .map_err(|e| Error::authentication(Box::new(e)))?; transition!(ReadingAuthCompletion { stream: state.stream }) diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 023454566..a7908fb50 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -5,15 +5,15 @@ use tokio_postgres::TargetSessionAttrs; #[test] fn pairs_ok() { - let params = r"user=foo password=' fizz \'buzz\\ ' thing = ''" + let params = r"user=foo password=' fizz \'buzz\\ ' application_name = ''" .parse::() .unwrap(); let mut expected = tokio_postgres::Config::new(); expected - .param("user", "foo") + .user("foo") .password(r" fizz 'buzz\ ") - .param("thing", ""); + .application_name(""); assert_eq!(params, expected); } @@ -25,7 +25,7 @@ fn pairs_ws() { .unwrap(); let mut expected = tokio_postgres::Config::new(); - expected.param("user", "foo").password("hunter2"); + expected.user("foo").password("hunter2"); assert_eq!(params, expected); } diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 29df4d8c7..bdcd98d6c 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -61,7 +61,8 @@ fn target_session_attrs_ok() { fn target_session_attrs_err() { let mut runtime = Runtime::new().unwrap(); let f = tokio_postgres::connect( - "host=localhost port=5433 user=postgres target_session_attrs=read-write default_transaction_read_only=on", + "host=localhost port=5433 user=postgres target_session_attrs=read-write + options='-c default_transaction_read_only=on'", NoTls, ); runtime.block_on(f).err().unwrap(); From 272f1a88b9a174115e0943b6c215ae3db6872ffa Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 30 Dec 2018 20:12:17 -0800 Subject: [PATCH 093/819] Fix warning --- tokio-postgres/src/config.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 288601034..ad03ba7b5 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -294,14 +294,17 @@ impl fmt::Display for UnknownOption { impl error::Error for UnknownOption {} #[derive(Debug)] +#[cfg(feature = "runtime")] struct InvalidValue(&'static str); +#[cfg(feature = "runtime")] impl fmt::Display for InvalidValue { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "invalid value for option `{}`", self.0) } } +#[cfg(feature = "runtime")] impl error::Error for InvalidValue {} struct Parser<'a> { From 8dcad81224a0beced3dd3c24609f9fe4bb13332a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 31 Dec 2018 19:17:04 -0800 Subject: [PATCH 094/819] Support URL-style configuration strings --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/config.rs | 382 ++++++++++++++++++++++------- tokio-postgres/tests/test/parse.rs | 138 ++++++++--- 3 files changed, 401 insertions(+), 120 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f058446a8..35a121e6a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -43,6 +43,7 @@ bytes = "0.4" fallible-iterator = "0.1.6" futures = "0.1.7" log = "0.4" +percent-encoding = "1.0" phf = "0.7.23" postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } state_machine_future = "0.1.7" diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index ad03ba7b5..38dfb7558 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,6 +1,12 @@ +use std::borrow::Cow; use std::error; +#[cfg(all(feature = "runtime", unix))] +use std::ffi::OsStr; use std::fmt; use std::iter; +use std::mem; +#[cfg(all(feature = "runtime", unix))] +use std::os::unix::ffi::OsStrExt; #[cfg(all(feature = "runtime", unix))] use std::path::{Path, PathBuf}; use std::str::{self, FromStr}; @@ -173,6 +179,89 @@ impl Config { self } + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { + match key { + "user" => { + self.user(&value); + } + "password" => { + self.password(value); + } + "dbname" => { + self.dbname(&value); + } + "options" => { + self.options(&value); + } + "application_name" => { + self.application_name(&value); + } + #[cfg(feature = "runtime")] + "host" => { + for host in value.split(',') { + self.host(host); + } + } + #[cfg(feature = "runtime")] + "port" => { + for port in value.split(',') { + let port = if port.is_empty() { + 5432 + } else { + port.parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("port"))))? + }; + self.port(port); + } + } + #[cfg(feature = "runtime")] + "connect_timeout" => { + let timeout = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("connect_timeout"))))?; + if timeout > 0 { + self.connect_timeout(Duration::from_secs(timeout as u64)); + } + } + #[cfg(feature = "runtime")] + "keepalives" => { + let keepalives = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; + self.keepalives(keepalives != 0); + } + #[cfg(feature = "runtime")] + "keepalives_idle" => { + let keepalives_idle = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives_idle"))))?; + if keepalives_idle > 0 { + self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); + } + } + #[cfg(feature = "runtime")] + "target_session_attrs" => { + let target_session_attrs = match &*value { + "any" => TargetSessionAttrs::Any, + "read-write" => TargetSessionAttrs::ReadWrite, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "target_session_attrs", + )))); + } + }; + self.target_session_attrs(target_session_attrs); + } + key => { + return Err(Error::config_parse(Box::new(UnknownOption( + key.to_string(), + )))); + } + } + + Ok(()) + } + pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake where S: AsyncRead + AsyncWrite, @@ -194,91 +283,10 @@ impl FromStr for Config { type Err = Error; fn from_str(s: &str) -> Result { - let mut parser = Parser::new(s); - let mut builder = Config::new(); - - while let Some((key, value)) = parser.parameter()? { - match key { - "user" => { - builder.user(&value); - } - "password" => { - builder.password(value); - } - "dbname" => { - builder.dbname(&value); - } - "options" => { - builder.options(&value); - } - "application_name" => { - builder.application_name(&value); - } - #[cfg(feature = "runtime")] - "host" => { - for host in value.split(',') { - builder.host(host); - } - } - #[cfg(feature = "runtime")] - "port" => { - for port in value.split(',') { - let port = if port.is_empty() { - 5432 - } else { - port.parse() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("port"))))? - }; - builder.port(port); - } - } - #[cfg(feature = "runtime")] - "connect_timeout" => { - let timeout = value.parse::().map_err(|_| { - Error::config_parse(Box::new(InvalidValue("connect_timeout"))) - })?; - if timeout > 0 { - builder.connect_timeout(Duration::from_secs(timeout as u64)); - } - } - #[cfg(feature = "runtime")] - "keepalives" => { - let keepalives = value - .parse::() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; - builder.keepalives(keepalives != 0); - } - #[cfg(feature = "runtime")] - "keepalives_idle" => { - let keepalives_idle = value.parse::().map_err(|_| { - Error::config_parse(Box::new(InvalidValue("keepalives_idle"))) - })?; - if keepalives_idle > 0 { - builder.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); - } - } - #[cfg(feature = "runtime")] - "target_session_attrs" => { - let target_session_attrs = match &*value { - "any" => TargetSessionAttrs::Any, - "read-write" => TargetSessionAttrs::ReadWrite, - _ => { - return Err(Error::config_parse(Box::new(InvalidValue( - "target_session_attrs", - )))) - } - }; - builder.target_session_attrs(target_session_attrs); - } - key => { - return Err(Error::config_parse(Box::new(UnknownOption( - key.to_string(), - )))) - } - } + match UrlParser::parse(s)? { + Some(config) => Ok(config), + None => Parser::parse(s), } - - Ok(builder) } } @@ -294,17 +302,14 @@ impl fmt::Display for UnknownOption { impl error::Error for UnknownOption {} #[derive(Debug)] -#[cfg(feature = "runtime")] struct InvalidValue(&'static str); -#[cfg(feature = "runtime")] impl fmt::Display for InvalidValue { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "invalid value for option `{}`", self.0) } } -#[cfg(feature = "runtime")] impl error::Error for InvalidValue {} struct Parser<'a> { @@ -313,11 +318,19 @@ struct Parser<'a> { } impl<'a> Parser<'a> { - fn new(s: &'a str) -> Parser<'a> { - Parser { + fn parse(s: &'a str) -> Result { + let mut parser = Parser { s, it: s.char_indices().peekable(), + }; + + let mut config = Config::new(); + + while let Some((key, value)) = parser.parameter()? { + config.param(key, &value)?; } + + Ok(config) } fn skip_ws(&mut self) { @@ -456,3 +469,194 @@ impl<'a> Parser<'a> { Ok(Some((keyword, value))) } } + +// This is a pretty sloppy "URL" parser, but it matches the behavior of libpq, where things really aren't very strict +struct UrlParser<'a> { + s: &'a str, + config: Config, +} + +impl<'a> UrlParser<'a> { + fn parse(s: &'a str) -> Result, Error> { + let s = match Self::remove_url_prefix(s) { + Some(s) => s, + None => return Ok(None), + }; + + let mut parser = UrlParser { + s, + config: Config::new(), + }; + + parser.parse_credentials()?; + parser.parse_host()?; + parser.parse_path()?; + parser.parse_params()?; + + Ok(Some(parser.config)) + } + + fn remove_url_prefix(s: &str) -> Option<&str> { + for prefix in &["postgres://", "postgresql://"] { + if s.starts_with(prefix) { + return Some(&s[prefix.len()..]); + } + } + + None + } + + fn take_until(&mut self, end: &[char]) -> Option<&'a str> { + match self.s.find(end) { + Some(pos) => { + let (head, tail) = self.s.split_at(pos); + self.s = tail; + Some(head) + } + None => None, + } + } + + fn take_all(&mut self) -> &'a str { + mem::replace(&mut self.s, "") + } + + fn eat_byte(&mut self) { + self.s = &self.s[1..]; + } + + fn parse_credentials(&mut self) -> Result<(), Error> { + let creds = match self.take_until(&['@']) { + Some(creds) => creds, + None => return Ok(()), + }; + self.eat_byte(); + + let mut it = creds.splitn(2, ':'); + let user = self.decode(it.next().unwrap())?; + self.config.user(&user); + + if let Some(password) = it.next() { + let password = Cow::from(percent_encoding::percent_decode(password.as_bytes())); + self.config.password(password); + } + + Ok(()) + } + + fn parse_host(&mut self) -> Result<(), Error> { + let host = match self.take_until(&['/', '?']) { + Some(host) => host, + None => self.take_all(), + }; + + if host.is_empty() { + return Ok(()); + } + + for chunk in host.split(',') { + let (host, port) = if chunk.starts_with('[') { + let idx = match chunk.find(']') { + Some(idx) => idx, + None => return Err(Error::config_parse(InvalidValue("host").into())), + }; + + let host = &chunk[1..idx]; + let remaining = &chunk[idx + 1..]; + let port = if remaining.starts_with(':') { + Some(&remaining[1..]) + } else if remaining.is_empty() { + None + } else { + return Err(Error::config_parse(InvalidValue("host").into())); + }; + + (host, port) + } else { + let mut it = chunk.splitn(2, ':'); + (it.next().unwrap(), it.next()) + }; + + self.host_param(host)?; + let port = self.decode(port.unwrap_or("5432"))?; + self.config.param("port", &port)?; + } + + Ok(()) + } + + fn parse_path(&mut self) -> Result<(), Error> { + if !self.s.starts_with('/') { + return Ok(()); + } + self.eat_byte(); + + let dbname = match self.take_until(&['?']) { + Some(dbname) => dbname, + None => self.take_all(), + }; + + if !dbname.is_empty() { + self.config.dbname(&self.decode(dbname)?); + } + + Ok(()) + } + + fn parse_params(&mut self) -> Result<(), Error> { + if !self.s.starts_with('?') { + return Ok(()); + } + self.eat_byte(); + + while !self.s.is_empty() { + let key = match self.take_until(&['=']) { + Some(key) => self.decode(key)?, + None => return Err(Error::config_parse("unterminated parameter".into())), + }; + self.eat_byte(); + + let value = match self.take_until(&['&']) { + Some(value) => { + self.eat_byte(); + value + } + None => self.take_all(), + }; + + if key == "host" { + self.host_param(value)?; + } else { + let value = self.decode(value)?; + self.config.param(&key, &value)?; + } + } + + Ok(()) + } + + #[cfg(all(feature = "runtime", unix))] + fn host_param(&mut self, s: &str) -> Result<(), Error> { + let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); + if decoded.get(0) == Some(&b'/') { + self.config.host_path(OsStr::from_bytes(&decoded)); + } else { + let decoded = str::from_utf8(&decoded).map_err(|e| Error::config_parse(Box::new(e)))?; + self.config.host(decoded); + } + + Ok(()) + } + + #[cfg(not(all(feature = "runtime", unix)))] + fn host_param(&mut self, s: &str) -> Result<(), Error> { + let s = self.decode(s)?; + self.config.param("host", &s) + } + + fn decode(&self, s: &'a str) -> Result, Error> { + percent_encoding::percent_decode(s.as_bytes()) + .decode_utf8() + .map_err(|e| Error::config_parse(e.into())) + } +} diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index a7908fb50..0e94e33c8 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,49 +1,125 @@ #[cfg(feature = "runtime")] use std::time::Duration; +use tokio_postgres::Config; #[cfg(feature = "runtime")] use tokio_postgres::TargetSessionAttrs; +fn check(s: &str, config: &Config) { + assert_eq!(s.parse::().expect(s), *config, "`{}`", s); +} + #[test] fn pairs_ok() { - let params = r"user=foo password=' fizz \'buzz\\ ' application_name = ''" - .parse::() - .unwrap(); - - let mut expected = tokio_postgres::Config::new(); - expected - .user("foo") - .password(r" fizz 'buzz\ ") - .application_name(""); - - assert_eq!(params, expected); + check( + r"user=foo password=' fizz \'buzz\\ ' application_name = ''", + Config::new() + .user("foo") + .password(r" fizz 'buzz\ ") + .application_name(""), + ); } #[test] fn pairs_ws() { - let params = " user\t=\r\n\x0bfoo \t password = hunter2 " - .parse::() - .unwrap(); - - let mut expected = tokio_postgres::Config::new(); - expected.user("foo").password("hunter2"); - - assert_eq!(params, expected); + check( + " user\t=\r\n\x0bfoo \t password = hunter2 ", + Config::new().user("foo").password("hunter2"), + ); } #[test] #[cfg(feature = "runtime")] fn settings() { - let params = - "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-write" - .parse::() - .unwrap(); - - let mut expected = tokio_postgres::Config::new(); - expected - .connect_timeout(Duration::from_secs(3)) - .keepalives(false) - .keepalives_idle(Duration::from_secs(30)) - .target_session_attrs(TargetSessionAttrs::ReadWrite); + check( + "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-write", + Config::new() + .connect_timeout(Duration::from_secs(3)) + .keepalives(false) + .keepalives_idle(Duration::from_secs(30)) + .target_session_attrs(TargetSessionAttrs::ReadWrite), + ); +} - assert_eq!(params, expected); +#[test] +#[cfg(feature = "runtime")] +fn url() { + check("postgresql://", &Config::new()); + check( + "postgresql://localhost", + Config::new().host("localhost").port(5432), + ); + check( + "postgresql://localhost:5433", + Config::new().host("localhost").port(5433), + ); + check( + "postgresql://localhost/mydb", + Config::new().host("localhost").port(5432).dbname("mydb"), + ); + check( + "postgresql://user@localhost", + Config::new().user("user").host("localhost").port(5432), + ); + check( + "postgresql://user:secret@localhost", + Config::new() + .user("user") + .password("secret") + .host("localhost") + .port(5432), + ); + check( + "postgresql://other@localhost/otherdb?connect_timeout=10&application_name=myapp", + Config::new() + .user("other") + .host("localhost") + .port(5432) + .dbname("otherdb") + .connect_timeout(Duration::from_secs(10)) + .application_name("myapp"), + ); + check( + "postgresql://host1:123,host2:456/somedb?target_session_attrs=any&application_name=myapp", + Config::new() + .host("host1") + .port(123) + .host("host2") + .port(456) + .dbname("somedb") + .target_session_attrs(TargetSessionAttrs::Any) + .application_name("myapp"), + ); + check( + "postgresql:///mydb?host=localhost&port=5433", + Config::new().dbname("mydb").host("localhost").port(5433), + ); + check( + "postgresql://[2001:db8::1234]/database", + Config::new() + .host("2001:db8::1234") + .port(5432) + .dbname("database"), + ); + check( + "postgresql://[2001:db8::1234]:5433/database", + Config::new() + .host("2001:db8::1234") + .port(5433) + .dbname("database"), + ); + #[cfg(unix)] + check( + "postgresql:///dbname?host=/var/lib/postgresql", + Config::new() + .dbname("dbname") + .host_path("/var/lib/postgresql"), + ); + #[cfg(unix)] + check( + "postgresql://%2Fvar%2Flib%2Fpostgresql/dbname", + Config::new() + .host_path("/var/lib/postgresql") + .port(5432) + .dbname("dbname"), + ) } From 9e25e47fe0abf022044ae9616b3b0ddbaf91be9c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 1 Jan 2019 12:20:41 -0800 Subject: [PATCH 095/819] cleanup --- tokio-postgres/src/proto/connect_once.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index c6591d1df..76ed9c422 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -6,8 +6,6 @@ use lazy_static::lazy_static; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; use std::net::{SocketAddr, ToSocketAddrs}; -#[cfg(unix)] -use std::path::Path; use std::time::Instant; use std::vec; use tokio_tcp::TcpStream; @@ -111,7 +109,7 @@ where } #[cfg(unix)] Host::Unix(host) => { - let path = Path::new(host).join(format!(".s.PGSQL.{}", port)); + let path = host.join(format!(".s.PGSQL.{}", port)); transition!(ConnectingUnix { future: UnixStream::connect(path), timeout, From b2f1d514510c005d4ba5cf14337a29baa8a68941 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 3 Jan 2019 21:14:16 -0800 Subject: [PATCH 096/819] cleanup --- tokio-postgres/src/proto/handshake.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index b3b1e47b9..6b27f0eae 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -92,8 +92,7 @@ where } let mut buf = vec![]; - frontend::startup_message(params.iter().map(|e| (e.0, e.1)), &mut buf) - .map_err(Error::encode)?; + frontend::startup_message(params, &mut buf).map_err(Error::encode)?; let stream = Framed::new(stream, PostgresCodec); From b55c2ea3c499e2d607d20e35cca72c51d9f02e77 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 4 Jan 2019 16:57:15 -0800 Subject: [PATCH 097/819] Regenerate phf map for new release --- tokio-postgres/src/error/sqlstate.rs | 516 +++++++++++++-------------- 1 file changed, 258 insertions(+), 258 deletions(-) diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index dc33991fd..27ee63530 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -777,299 +777,299 @@ impl SqlState { } #[cfg_attr(rustfmt, rustfmt_skip)] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 6246114685207409605, + key: 3213172566270843353, disps: ::phf::Slice::Static(&[ - (1, 77), - (0, 21), + (1, 16), + (1, 97), + (0, 146), + (0, 0), + (1, 0), + (0, 217), + (3, 134), + (0, 2), (0, 6), - (0, 13), - (0, 28), - (0, 30), - (0, 1), - (0, 47), - (0, 5), + (0, 32), + (1, 99), + (1, 227), + (0, 6), + (0, 163), + (0, 89), (0, 5), - (0, 52), - (0, 96), - (0, 66), - (3, 167), - (0, 10), - (0, 56), - (2, 159), - (0, 28), + (0, 3), + (0, 200), + (4, 99), + (0, 32), + (0, 19), + (0, 82), + (0, 54), + (1, 126), (0, 11), - (0, 6), + (0, 83), + (6, 102), + (0, 67), + (4, 162), + (0, 13), + (0, 116), + (11, 57), + (0, 210), + (0, 4), + (4, 127), + (1, 133), + (1, 158), + (0, 180), + (2, 201), + (0, 148), + (4, 135), + (0, 5), + (1, 1), + (0, 0), + (0, 191), + (0, 171), + (7, 38), (0, 0), - (1, 188), - (7, 122), - (2, 32), - (1, 6), - (1, 142), - (1, 0), - (1, 71), - (2, 35), - (0, 1), - (0, 68), - (0, 66), - (5, 43), (0, 0), - (1, 0), - (2, 123), - (1, 1), - (0, 31), - (3, 7), - (3, 172), - (0, 35), - (0, 107), - (0, 106), - (0, 102), - (17, 207), - (6, 8), - (0, 105), - (26, 87), - (0, 21), ]), entries: ::phf::Slice::Static(&[ - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("428C9", SqlState::GENERATED_ALWAYS), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("42622", SqlState::NAME_TOO_LONG), - ("42804", SqlState::DATATYPE_MISMATCH), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("22000", SqlState::DATA_EXCEPTION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("42704", SqlState::UNDEFINED_OBJECT), - ("42703", SqlState::UNDEFINED_COLUMN), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("23505", SqlState::UNIQUE_VIOLATION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("20000", SqlState::CASE_NOT_FOUND), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("42939", SqlState::RESERVED_NAME), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("42P20", SqlState::WINDOWING_ERROR), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("42701", SqlState::DUPLICATE_COLUMN), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), ("22022", SqlState::INDICATOR_OVERFLOW), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("42622", SqlState::NAME_TOO_LONG), ("42P19", SqlState::INVALID_RECURSION), - ("XX000", SqlState::INTERNAL_ERROR), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("F0000", SqlState::CONFIG_FILE_ERROR), ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("57P04", SqlState::DATABASE_DROPPED), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("22011", SqlState::SUBSTRING_ERROR), - ("42P21", SqlState::COLLATION_MISMATCH), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("0L000", SqlState::INVALID_GRANTOR), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("P0004", SqlState::ASSERT_FAILURE), - ("P0003", SqlState::TOO_MANY_ROWS), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("42710", SqlState::DUPLICATE_OBJECT), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("58000", SqlState::SYSTEM_ERROR), - ("42P07", SqlState::DUPLICATE_TABLE), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("53200", SqlState::OUT_OF_MEMORY), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("3D000", SqlState::INVALID_CATALOG_NAME), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("08006", SqlState::CONNECTION_FAILURE), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("P0004", SqlState::ASSERT_FAILURE), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("57014", SqlState::QUERY_CANCELED), + ("57P01", SqlState::ADMIN_SHUTDOWN), ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("42P22", SqlState::INDETERMINATE_COLLATION), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("0L000", SqlState::INVALID_GRANTOR), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), ("42P02", SqlState::UNDEFINED_PARAMETER), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("42P01", SqlState::UNDEFINED_TABLE), - ("P0002", SqlState::NO_DATA_FOUND), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("F0001", SqlState::LOCK_FILE_EXISTS), ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("P0000", SqlState::PLPGSQL_ERROR), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("42939", SqlState::RESERVED_NAME), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), ("XX002", SqlState::INDEX_CORRUPTED), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("42602", SqlState::INVALID_NAME), + ("55006", SqlState::OBJECT_IN_USE), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("02000", SqlState::NO_DATA), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("P0002", SqlState::NO_DATA_FOUND), + ("P0003", SqlState::TOO_MANY_ROWS), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("42803", SqlState::GROUPING_ERROR), ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("2200M", SqlState::INVALID_XML_DOCUMENT), ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("42846", SqlState::CANNOT_COERCE), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("53100", SqlState::DISK_FULL), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("58P02", SqlState::DUPLICATE_FILE), + ("P0001", SqlState::RAISE_EXCEPTION), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("42601", SqlState::SYNTAX_ERROR), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("22027", SqlState::TRIM_ERROR), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), + ("58000", SqlState::SYSTEM_ERROR), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), ("23514", SqlState::CHECK_VIOLATION), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("20000", SqlState::CASE_NOT_FOUND), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("XX001", SqlState::DATA_CORRUPTED), + ("54011", SqlState::TOO_MANY_COLUMNS), + ("57P04", SqlState::DATABASE_DROPPED), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("42701", SqlState::DUPLICATE_COLUMN), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("22011", SqlState::SUBSTRING_ERROR), + ("42P20", SqlState::WINDOWING_ERROR), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("23001", SqlState::RESTRICT_VIOLATION), + ("428C9", SqlState::GENERATED_ALWAYS), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("42P07", SqlState::DUPLICATE_TABLE), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("XX000", SqlState::INTERNAL_ERROR), ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), ("2200N", SqlState::INVALID_XML_CONTENT), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("58P02", SqlState::DUPLICATE_FILE), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("F0000", SqlState::CONFIG_FILE_ERROR), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("42P06", SqlState::DUPLICATE_SCHEMA), ("28P01", SqlState::INVALID_PASSWORD), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("57014", SqlState::QUERY_CANCELED), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("42602", SqlState::INVALID_NAME), - ("23001", SqlState::RESTRICT_VIOLATION), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("HV000", SqlState::FDW_ERROR), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("53200", SqlState::OUT_OF_MEMORY), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("P0000", SqlState::PLPGSQL_ERROR), + ("42712", SqlState::DUPLICATE_ALIAS), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("22012", SqlState::DIVISION_BY_ZERO), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("42804", SqlState::DATATYPE_MISMATCH), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), ("2200S", SqlState::INVALID_XML_COMMENT), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("42710", SqlState::DUPLICATE_OBJECT), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("HV000", SqlState::FDW_ERROR), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("23505", SqlState::UNIQUE_VIOLATION), + ("42703", SqlState::UNDEFINED_COLUMN), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("01000", SqlState::WARNING), + ("42P01", SqlState::UNDEFINED_TABLE), + ("22027", SqlState::TRIM_ERROR), ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("42P21", SqlState::COLLATION_MISMATCH), ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("P0001", SqlState::RAISE_EXCEPTION), - ("58030", SqlState::IO_ERROR), - ("XX001", SqlState::DATA_CORRUPTED), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("42601", SqlState::SYNTAX_ERROR), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("53100", SqlState::DISK_FULL), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("55006", SqlState::OBJECT_IN_USE), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("42P04", SqlState::DUPLICATE_DATABASE), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("58P01", SqlState::UNDEFINED_FILE), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("42704", SqlState::UNDEFINED_OBJECT), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("34000", SqlState::INVALID_CURSOR_NAME), ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("42846", SqlState::CANNOT_COERCE), - ("08006", SqlState::CONNECTION_FAILURE), - ("42830", SqlState::INVALID_FOREIGN_KEY), ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("01000", SqlState::WARNING), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("02000", SqlState::NO_DATA), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("58P01", SqlState::UNDEFINED_FILE), - ("22012", SqlState::DIVISION_BY_ZERO), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("42803", SqlState::GROUPING_ERROR), - ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("22000", SqlState::DATA_EXCEPTION), + ("58030", SqlState::IO_ERROR), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), ]), }; From 0ae7670e05a1c1c323a57869ab85c8dc87778389 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 5 Jan 2019 21:39:08 -0800 Subject: [PATCH 098/819] tokio-postgres crate root docs --- tokio-postgres/src/lib.rs | 108 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0e491480d..b42774a64 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -1,3 +1,111 @@ +//! An asynchronous, pipelined, PostgreSQL client. +//! +//! # Example +//! +//! ```no_run +//! use futures::{Future, Stream}; +//! use tokio_postgres::NoTls; +//! +//! # #[cfg(not(feature = "runtime"))] +//! # let fut = futures::future::ok(()); +//! # #[cfg(feature = "runtime")] +//! let fut = +//! // Connect to the database +//! tokio_postgres::connect("host=localhost user=postgres", NoTls) +//! +//! .map(|(client, connection)| { +//! // The connection object performs the actual communication with the database, +//! // so spawn it off to run on its own. +//! let connection = connection.map_err(|e| eprintln!("connection error: {}", e)); +//! tokio::spawn(connection); +//! +//! // The client is what you use to make requests. +//! client +//! }) +//! +//! .and_then(|mut client| { +//! // Now we can prepare a simple statement that just returns its parameter. +//! client.prepare("SELECT $1::TEXT") +//! .map(|statement| (client, statement)) +//! }) +//! +//! .and_then(|(mut client, statement)| { +//! // And then execute it, returning a Stream of Rows which we collect into a Vec +//! client.query(&statement, &[&"hello world"]).collect() +//! }) +//! +//! // Now we can check that we got back the same string we sent over. +//! .map(|rows| { +//! let value: &str = rows[0].get(0); +//! assert_eq!(value, "hello world"); +//! }) +//! +//! // And report any errors that happened. +//! .map_err(|e| { +//! eprintln!("error: {}", e); +//! }); +//! +//! // By default, tokio_postgres uses the tokio crate as its runtime. +//! tokio::run(fut); +//! ``` +//! +//! # Pipelining +//! +//! The client supports *pipelined* requests. Pipelining can improve performance in use cases in which multiple, +//! independent queries need to be executed. In a traditional workflow, each query is sent to the server after the +//! previous query completes. In contrast, pipelining allows the client to send all of the queries to the server up +//! front, eliminating time spent on both sides waiting for the other to finish sending data: +//! +//! ```not_rust +//! Sequential Pipelined +//! | Client | Server | | Client | Server | +//! |----------------|-----------------| |----------------|-----------------| +//! | send query 1 | | | send query 1 | | +//! | | process query 1 | | send query 2 | process query 1 | +//! | receive rows 1 | | | send query 3 | process query 2 | +//! | send query 2 | | | receive rows 1 | process query 3 | +//! | | process query 2 | | receive rows 2 | | +//! | receive rows 2 | | | receive rows 3 | | +//! | send query 3 | | +//! | | process query 3 | +//! | receive rows 3 | | +//! ``` +//! +//! In both cases, the PostgreSQL server is executing the queries sequentially - pipelining just allows both sides of +//! the connection to work concurrently when possible. +//! +//! Pipelining happens automatically when futures are polled concurrently (for example, by using the futures `join` +//! combinator). Say we want to prepare 2 statements: +//! +//! ```no_run +//! use futures::Future; +//! use tokio_postgres::{Client, Error, Statement}; +//! +//! fn prepare_sequential( +//! client: &mut Client, +//! ) -> impl Future +//! { +//! client.prepare("SELECT * FROM foo") +//! .and_then({ +//! let f = client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)"); +//! |s1| f.map(|s2| (s1, s2)) +//! }) +//! } +//! +//! fn prepare_pipelined( +//! client: &mut Client, +//! ) -> impl Future +//! { +//! client.prepare("SELECT * FROM foo") +//! .join(client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)")) +//! } +//! ``` +//! +//! # Runtime +//! +//! The client works with arbitrary `AsyncRead + AsyncWrite` streams. Convenience APIs are provided to handle the +//! connection process, but these are gated by the `runtime` Cargo feature, which is enabled by default. If disabled, +//! all dependence on the tokio runtime is removed. #![warn(rust_2018_idioms, clippy::all)] use bytes::{Bytes, IntoBuf}; From 940cbb8d4b9ba5308e84f92072ad6ad643f6cb1f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 5 Jan 2019 22:03:13 -0800 Subject: [PATCH 099/819] Remove future from MakeTlsMode It's unlikely to be useful in practice, and just introduces more complexity. --- postgres/src/client.rs | 1 - postgres/src/config.rs | 1 - tokio-postgres-openssl/src/lib.rs | 15 ++---- tokio-postgres/src/proto/connect.rs | 81 ++++++++++++----------------- tokio-postgres/src/tls.rs | 53 +++---------------- 5 files changed, 44 insertions(+), 107 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 880523db4..7c5ff34c0 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -18,7 +18,6 @@ impl Client { T: MakeTlsMode + 'static + Send, T::TlsMode: Send, T::Stream: Send, - T::Future: Send, >::Future: Send, { params.parse::()?.connect(tls_mode) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b6b3047d1..4c6147b73 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -97,7 +97,6 @@ impl Config { T: MakeTlsMode + 'static + Send, T::TlsMode: Send, T::Stream: Send, - T::Future: Send, >::Future: Send, { let connect = self.0.connect(tls_mode); diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 83b7c4c35..f635e1b1b 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,7 +1,5 @@ #![warn(rust_2018_idioms, clippy::all)] -#[cfg(feature = "runtime")] -use futures::future::{self, FutureResult}; use futures::{try_ready, Async, Future, Poll}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; @@ -44,12 +42,6 @@ impl MakeTlsConnector { { self.config = Arc::new(f); } - - fn make_tls_connect_inner(&mut self, domain: &str) -> Result { - let mut ssl = self.connector.configure()?; - (self.config)(&mut ssl)?; - Ok(TlsConnector::new(ssl, domain)) - } } #[cfg(feature = "runtime")] @@ -60,10 +52,11 @@ where type Stream = SslStream; type TlsConnect = TlsConnector; type Error = ErrorStack; - type Future = FutureResult; - fn make_tls_connect(&mut self, domain: &str) -> FutureResult { - future::result(self.make_tls_connect_inner(domain)) + fn make_tls_connect(&mut self, domain: &str) -> Result { + let mut ssl = self.connector.configure()?; + (self.config)(&mut ssl)?; + Ok(TlsConnector::new(ssl, domain)) } } diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 123a51e84..afc1d012b 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,4 +1,4 @@ -use futures::{try_ready, Async, Future, Poll}; +use futures::{Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use crate::proto::{Client, ConnectOnceFuture, Connection}; @@ -9,19 +9,12 @@ pub enum Connect where T: MakeTlsMode, { - #[state_machine_future(start, transitions(MakingTlsMode))] + #[state_machine_future(start, transitions(Connecting))] Start { make_tls_mode: T, config: Result, }, - #[state_machine_future(transitions(Connecting))] - MakingTlsMode { - future: T::Future, - idx: usize, - make_tls_mode: T, - config: Config, - }, - #[state_machine_future(transitions(MakingTlsMode, Finished))] + #[state_machine_future(transitions(Finished))] Connecting { future: ConnectOnceFuture, idx: usize, @@ -57,58 +50,48 @@ where #[cfg(unix)] Host::Unix(_) => "", }; - let future = state.make_tls_mode.make_tls_mode(hostname); + let tls_mode = state + .make_tls_mode + .make_tls_mode(hostname) + .map_err(|e| Error::tls(e.into()))?; - transition!(MakingTlsMode { - future, + transition!(Connecting { + future: ConnectOnceFuture::new(0, tls_mode, config.clone()), idx: 0, make_tls_mode: state.make_tls_mode, config, }) } - fn poll_making_tls_mode<'a>( - state: &'a mut RentToOwn<'a, MakingTlsMode>, - ) -> Poll, Error> { - let tls_mode = try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); - let state = state.take(); - - transition!(Connecting { - future: ConnectOnceFuture::new(state.idx, tls_mode, state.config.clone()), - idx: state.idx, - make_tls_mode: state.make_tls_mode, - config: state.config, - }) - } - fn poll_connecting<'a>( state: &'a mut RentToOwn<'a, Connecting>, ) -> Poll, Error> { - match state.future.poll() { - Ok(Async::Ready(r)) => transition!(Finished(r)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => { - let mut state = state.take(); - let idx = state.idx + 1; + loop { + match state.future.poll() { + Ok(Async::Ready(r)) => transition!(Finished(r)), + Ok(Async::NotReady) => return Ok(Async::NotReady), + Err(e) => { + let state = &mut **state; + state.idx += 1; - let host = match state.config.0.host.get(idx) { - Some(host) => host, - None => return Err(e), - }; + let host = match state.config.0.host.get(state.idx) { + Some(host) => host, + None => return Err(e), + }; - let hostname = match host { - Host::Tcp(host) => &**host, - #[cfg(unix)] - Host::Unix(_) => "", - }; - let future = state.make_tls_mode.make_tls_mode(hostname); + let hostname = match host { + Host::Tcp(host) => &**host, + #[cfg(unix)] + Host::Unix(_) => "", + }; + let tls_mode = state + .make_tls_mode + .make_tls_mode(hostname) + .map_err(|e| Error::tls(e.into()))?; - transition!(MakingTlsMode { - future, - idx, - make_tls_mode: state.make_tls_mode, - config: state.config, - }) + state.future = + ConnectOnceFuture::new(state.idx, tls_mode, state.config.clone()); + } } } } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index c6a853b49..a5170dd48 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -30,9 +30,8 @@ pub trait MakeTlsMode { type Stream: AsyncRead + AsyncWrite; type TlsMode: TlsMode; type Error: Into>; - type Future: Future; - fn make_tls_mode(&mut self, domain: &str) -> Self::Future; + fn make_tls_mode(&mut self, domain: &str) -> Result; } pub trait TlsMode { @@ -50,9 +49,8 @@ pub trait MakeTlsConnect { type Stream: AsyncRead + AsyncWrite; type TlsConnect: TlsConnect; type Error: Into>; - type Future: Future; - fn make_tls_connect(&mut self, domain: &str) -> Self::Future; + fn make_tls_connect(&mut self, domain: &str) -> Result; } pub trait TlsConnect { @@ -74,10 +72,9 @@ where type Stream = S; type TlsMode = NoTls; type Error = Void; - type Future = FutureResult; - fn make_tls_mode(&mut self, _: &str) -> FutureResult { - future::ok(NoTls) + fn make_tls_mode(&mut self, _: &str) -> Result { + Ok(NoTls) } } @@ -112,26 +109,9 @@ where type Stream = MaybeTlsStream; type TlsMode = PreferTls; type Error = T::Error; - type Future = MakePreferTlsFuture; - fn make_tls_mode(&mut self, domain: &str) -> MakePreferTlsFuture { - MakePreferTlsFuture(self.0.make_tls_connect(domain)) - } -} - -#[cfg(feature = "runtime")] -pub struct MakePreferTlsFuture(F); - -#[cfg(feature = "runtime")] -impl Future for MakePreferTlsFuture -where - F: Future, -{ - type Item = PreferTls; - type Error = F::Error; - - fn poll(&mut self) -> Poll, F::Error> { - self.0.poll().map(|f| f.map(PreferTls)) + fn make_tls_mode(&mut self, domain: &str) -> Result, T::Error> { + self.0.make_tls_connect(domain).map(PreferTls) } } @@ -282,26 +262,9 @@ where type Stream = T::Stream; type TlsMode = RequireTls; type Error = T::Error; - type Future = MakeRequireTlsFuture; - - fn make_tls_mode(&mut self, domain: &str) -> MakeRequireTlsFuture { - MakeRequireTlsFuture(self.0.make_tls_connect(domain)) - } -} - -#[cfg(feature = "runtime")] -pub struct MakeRequireTlsFuture(F); - -#[cfg(feature = "runtime")] -impl Future for MakeRequireTlsFuture -where - F: Future, -{ - type Item = RequireTls; - type Error = F::Error; - fn poll(&mut self) -> Poll, F::Error> { - self.0.poll().map(|f| f.map(RequireTls)) + fn make_tls_mode(&mut self, domain: &str) -> Result, T::Error> { + self.0.make_tls_connect(domain).map(RequireTls) } } From e0d113791cc19aa0595c8e6220a92c50dbad11aa Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 6 Jan 2019 10:33:31 -0800 Subject: [PATCH 100/819] Rename raw cancel query --- tokio-postgres/src/config.rs | 17 +++++++++++++++-- tokio-postgres/src/lib.rs | 13 ++----------- .../proto/{cancel.rs => cancel_query_raw.rs} | 10 +++++----- tokio-postgres/src/proto/mod.rs | 4 ++-- tokio-postgres/tests/test/main.rs | 2 +- 5 files changed, 25 insertions(+), 21 deletions(-) rename tokio-postgres/src/proto/{cancel.rs => cancel_query_raw.rs} (89%) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 38dfb7558..c37776ddf 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -17,10 +17,10 @@ use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] use crate::proto::ConnectFuture; -use crate::proto::HandshakeFuture; +use crate::proto::{CancelQueryRawFuture, HandshakeFuture}; +use crate::{CancelData, CancelQueryRaw, Error, Handshake, TlsMode}; #[cfg(feature = "runtime")] use crate::{Connect, MakeTlsMode, Socket}; -use crate::{Error, Handshake, TlsMode}; #[cfg(feature = "runtime")] #[derive(Debug, Copy, Clone, PartialEq)] @@ -277,6 +277,19 @@ impl Config { { Connect(ConnectFuture::new(make_tls_mode, Ok(self.clone()))) } + + pub fn cancel_query_raw( + &self, + stream: S, + tls_mode: T, + cancel_data: CancelData, + ) -> CancelQueryRaw + where + S: AsyncRead + AsyncWrite, + T: TlsMode, + { + CancelQueryRaw(CancelQueryRawFuture::new(stream, tls_mode, cancel_data)) + } } impl FromStr for Config { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b42774a64..1ae2e60d0 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -116,7 +116,6 @@ use tokio_io::{AsyncRead, AsyncWrite}; pub use crate::config::*; pub use crate::error::*; -use crate::proto::CancelFuture; pub use crate::row::*; #[cfg(feature = "runtime")] pub use crate::socket::Socket; @@ -152,14 +151,6 @@ where Connect(proto::ConnectFuture::new(tls_mode, config.parse())) } -pub fn cancel_query(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelQuery -where - S: AsyncRead + AsyncWrite, - T: TlsMode, -{ - CancelQuery(CancelFuture::new(stream, tls_mode, cancel_data)) -} - pub struct Client(proto::Client); impl Client { @@ -265,12 +256,12 @@ pub enum AsyncMessage { } #[must_use = "futures do nothing unless polled"] -pub struct CancelQuery(proto::CancelFuture) +pub struct CancelQueryRaw(proto::CancelQueryRawFuture) where S: AsyncRead + AsyncWrite, T: TlsMode; -impl Future for CancelQuery +impl Future for CancelQueryRaw where S: AsyncRead + AsyncWrite, T: TlsMode, diff --git a/tokio-postgres/src/proto/cancel.rs b/tokio-postgres/src/proto/cancel_query_raw.rs similarity index 89% rename from tokio-postgres/src/proto/cancel.rs rename to tokio-postgres/src/proto/cancel_query_raw.rs index dc37de654..0b1c7534c 100644 --- a/tokio-postgres/src/proto/cancel.rs +++ b/tokio-postgres/src/proto/cancel_query_raw.rs @@ -9,7 +9,7 @@ use crate::proto::TlsFuture; use crate::{CancelData, TlsMode}; #[derive(StateMachineFuture)] -pub enum Cancel +pub enum CancelQueryRaw where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -31,7 +31,7 @@ where Failed(Error), } -impl PollCancel for Cancel +impl PollCancelQueryRaw for CancelQueryRaw where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -69,12 +69,12 @@ where } } -impl CancelFuture +impl CancelQueryRawFuture where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelFuture { - Cancel::start(TlsFuture::new(stream, tls_mode), cancel_data) + pub fn new(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelQueryRawFuture { + CancelQueryRaw::start(TlsFuture::new(stream, tls_mode), cancel_data) } } diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index ed460aabe..515ebaf42 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -19,7 +19,7 @@ macro_rules! try_ready_closed { } mod bind; -mod cancel; +mod cancel_query_raw; mod client; mod codec; #[cfg(feature = "runtime")] @@ -44,7 +44,7 @@ mod typeinfo_composite; mod typeinfo_enum; pub use crate::proto::bind::BindFuture; -pub use crate::proto::cancel::CancelFuture; +pub use crate::proto::cancel_query_raw::CancelQueryRawFuture; pub use crate::proto::client::Client; pub use crate::proto::codec::PostgresCodec; #[cfg(feature = "runtime")] diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 59f01238f..f21e0d7fa 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -245,7 +245,7 @@ fn cancel_query() { }) .then(|r| { let s = r.unwrap(); - tokio_postgres::cancel_query(s, NoTls, cancel_data) + tokio_postgres::Config::new().cancel_query_raw(s, NoTls, cancel_data) }) .then(|r| { r.unwrap(); From a6535b43103e12bf65ad5960ba7fc99d80bf1118 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 6 Jan 2019 15:20:28 -0800 Subject: [PATCH 101/819] Internal connect refactoring --- tokio-postgres/src/proto/connect_once.rs | 181 ++------------------- tokio-postgres/src/proto/connect_socket.rs | 179 ++++++++++++++++++++ tokio-postgres/src/proto/mod.rs | 4 + 3 files changed, 197 insertions(+), 167 deletions(-) create mode 100644 tokio-postgres/src/proto/connect_socket.rs diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 76ed9c422..2180e14fd 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -1,61 +1,27 @@ #![allow(clippy::large_enum_variant)] use futures::{try_ready, Async, Future, Poll, Stream}; -use futures_cpupool::{CpuFuture, CpuPool}; -use lazy_static::lazy_static; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::time::Instant; -use std::vec; -use tokio_tcp::TcpStream; -use tokio_timer::Delay; -#[cfg(unix)] -use tokio_uds::UnixStream; -use crate::proto::{Client, Connection, HandshakeFuture, SimpleQueryStream}; -use crate::{Config, Error, Host, Socket, TargetSessionAttrs, TlsMode}; - -lazy_static! { - static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() - .name_prefix("postgres-dns-") - .pool_size(2) - .create(); -} +use crate::proto::{Client, ConnectSocketFuture, Connection, HandshakeFuture, SimpleQueryStream}; +use crate::{Config, Error, Socket, TargetSessionAttrs, TlsMode}; #[derive(StateMachineFuture)] pub enum ConnectOnce where T: TlsMode, { - #[state_machine_future(start)] - #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] - #[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))] + #[state_machine_future(start, transitions(ConnectingSocket))] Start { idx: usize, tls_mode: T, config: Config, }, - #[cfg(unix)] - #[state_machine_future(transitions(Handshaking))] - ConnectingUnix { - future: tokio_uds::ConnectFuture, - timeout: Option, - tls_mode: T, - config: Config, - }, - #[state_machine_future(transitions(ConnectingTcp))] - ResolvingDns { - future: CpuFuture, io::Error>, - timeout: Option, - tls_mode: T, - config: Config, - }, #[state_machine_future(transitions(Handshaking))] - ConnectingTcp { - future: tokio_tcp::ConnectFuture, - addrs: vec::IntoIter, - timeout: Option, + ConnectingSocket { + future: ConnectSocketFuture, + idx: usize, tls_mode: T, config: Config, }, @@ -83,142 +49,23 @@ where fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let state = state.take(); - let port = *state - .config - .0 - .port - .get(state.idx) - .or_else(|| state.config.0.port.get(0)) - .unwrap_or(&5432); - - let timeout = state - .config - .0 - .connect_timeout - .map(|d| Delay::new(Instant::now() + d)); - - match &state.config.0.host[state.idx] { - Host::Tcp(host) => { - let host = host.clone(); - transition!(ResolvingDns { - future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()), - timeout, - tls_mode: state.tls_mode, - config: state.config, - }) - } - #[cfg(unix)] - Host::Unix(host) => { - let path = host.join(format!(".s.PGSQL.{}", port)); - transition!(ConnectingUnix { - future: UnixStream::connect(path), - timeout, - tls_mode: state.tls_mode, - config: state.config, - }) - } - } - } - - #[cfg(unix)] - fn poll_connecting_unix<'a>( - state: &'a mut RentToOwn<'a, ConnectingUnix>, - ) -> Poll, Error> { - if let Some(timeout) = &mut state.timeout { - match timeout.poll() { - Ok(Async::Ready(())) => { - return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) - } - Ok(Async::NotReady) => {} - Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), - } - } - - let stream = try_ready!(state.future.poll().map_err(Error::connect)); - let stream = Socket::new_unix(stream); - let state = state.take(); - - transition!(Handshaking { - target_session_attrs: state.config.0.target_session_attrs, - future: HandshakeFuture::new(stream, state.tls_mode, state.config), - }) - } - - fn poll_resolving_dns<'a>( - state: &'a mut RentToOwn<'a, ResolvingDns>, - ) -> Poll, Error> { - if let Some(timeout) = &mut state.timeout { - match timeout.poll() { - Ok(Async::Ready(())) => { - return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) - } - Ok(Async::NotReady) => {} - Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), - } - } - - let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); - let state = state.take(); - - let addr = match addrs.next() { - Some(addr) => addr, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::InvalidData, - "resolved 0 addresses", - ))); - } - }; - - transition!(ConnectingTcp { - future: TcpStream::connect(&addr), - addrs, - timeout: state.timeout, + transition!(ConnectingSocket { + future: ConnectSocketFuture::new(state.config.clone(), state.idx), + idx: state.idx, tls_mode: state.tls_mode, config: state.config, }) } - fn poll_connecting_tcp<'a>( - state: &'a mut RentToOwn<'a, ConnectingTcp>, - ) -> Poll, Error> { - if let Some(timeout) = &mut state.timeout { - match timeout.poll() { - Ok(Async::Ready(())) => { - return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))) - } - Ok(Async::NotReady) => {} - Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), - } - } - - let stream = loop { - match state.future.poll() { - Ok(Async::Ready(stream)) => break stream, - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(e) => { - let addr = match state.addrs.next() { - Some(addr) => addr, - None => return Err(Error::connect(e)), - }; - state.future = TcpStream::connect(&addr); - } - } - }; + fn poll_connecting_socket<'a>( + state: &'a mut RentToOwn<'a, ConnectingSocket>, + ) -> Poll, Error> { + let socket = try_ready!(state.future.poll()); let state = state.take(); - stream.set_nodelay(true).map_err(Error::connect)?; - if state.config.0.keepalives { - stream - .set_keepalive(Some(state.config.0.keepalives_idle)) - .map_err(Error::connect)?; - } - - let stream = Socket::new_tcp(stream); - transition!(Handshaking { target_session_attrs: state.config.0.target_session_attrs, - future: HandshakeFuture::new(stream, state.tls_mode, state.config), + future: HandshakeFuture::new(socket, state.tls_mode, state.config), }) } diff --git a/tokio-postgres/src/proto/connect_socket.rs b/tokio-postgres/src/proto/connect_socket.rs new file mode 100644 index 000000000..65cbe8955 --- /dev/null +++ b/tokio-postgres/src/proto/connect_socket.rs @@ -0,0 +1,179 @@ +use futures::{try_ready, Async, Future, Poll}; +use futures_cpupool::{CpuFuture, CpuPool}; +use lazy_static::lazy_static; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use std::io; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::time::Instant; +use std::vec; +use tokio_tcp::TcpStream; +use tokio_timer::Delay; +#[cfg(unix)] +use tokio_uds::UnixStream; + +use crate::{Config, Error, Host, Socket}; + +lazy_static! { + static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() + .name_prefix("postgres-dns-") + .pool_size(2) + .create(); +} + +#[derive(StateMachineFuture)] +pub enum ConnectSocket { + #[state_machine_future(start)] + #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] + #[cfg_attr(not(unix), state_machine_future(transitions(ResolvingDns)))] + Start { config: Config, idx: usize }, + #[cfg(unix)] + #[state_machine_future(transitions(Finished))] + ConnectingUnix { + future: tokio_uds::ConnectFuture, + timeout: Option, + }, + #[state_machine_future(transitions(ConnectingTcp))] + ResolvingDns { + future: CpuFuture, io::Error>, + config: Config, + }, + #[state_machine_future(transitions(Finished))] + ConnectingTcp { + future: tokio_tcp::ConnectFuture, + timeout: Option, + addrs: vec::IntoIter, + config: Config, + }, + #[state_machine_future(ready)] + Finished(Socket), + #[state_machine_future(error)] + Failed(Error), +} + +impl PollConnectSocket for ConnectSocket { + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { + let state = state.take(); + + let port = *state + .config + .0 + .port + .get(state.idx) + .or_else(|| state.config.0.port.get(0)) + .unwrap_or(&5432); + + match &state.config.0.host[state.idx] { + Host::Tcp(host) => transition!(ResolvingDns { + future: DNS_POOL.spawn_fn({ + let host = host.clone(); + move || (&*host, port).to_socket_addrs() + }), + config: state.config, + }), + #[cfg(unix)] + Host::Unix(host) => { + let path = host.join(format!(".s.PGSQL.{}", port)); + let timeout = state + .config + .0 + .connect_timeout + .map(|d| Delay::new(Instant::now() + d)); + transition!(ConnectingUnix { + future: UnixStream::connect(path), + timeout, + }) + } + } + } + + #[cfg(unix)] + fn poll_connecting_unix<'a>( + state: &'a mut RentToOwn<'a, ConnectingUnix>, + ) -> Poll { + if let Some(timeout) = &mut state.timeout { + match timeout.poll() { + Ok(Async::Ready(())) => { + return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))); + } + Ok(Async::NotReady) => {} + Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), + } + } + let socket = try_ready!(state.future.poll().map_err(Error::connect)); + + transition!(Finished(Socket::new_unix(socket))) + } + + fn poll_resolving_dns<'a>( + state: &'a mut RentToOwn<'a, ResolvingDns>, + ) -> Poll { + let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); + let state = state.take(); + + let addr = match addrs.next() { + Some(addr) => addr, + None => { + return Err(Error::connect(io::Error::new( + io::ErrorKind::InvalidData, + "resolved 0 addresses", + ))); + } + }; + + let timeout = state + .config + .0 + .connect_timeout + .map(|d| Delay::new(Instant::now() + d)); + + transition!(ConnectingTcp { + future: TcpStream::connect(&addr), + addrs, + timeout: timeout, + config: state.config, + }) + } + + fn poll_connecting_tcp<'a>( + state: &'a mut RentToOwn<'a, ConnectingTcp>, + ) -> Poll { + let stream = loop { + let error = match state.future.poll() { + Ok(Async::Ready(stream)) => break stream, + Ok(Async::NotReady) => match &mut state.timeout { + Some(timeout) => { + try_ready!(timeout + .poll() + .map_err(|e| Error::connect(io::Error::new(io::ErrorKind::Other, e)))); + io::Error::from(io::ErrorKind::TimedOut) + } + None => return Ok(Async::NotReady), + }, + Err(e) => e, + }; + + let addr = state.addrs.next().ok_or_else(|| Error::connect(error))?; + state.future = TcpStream::connect(&addr); + state.timeout = state + .config + .0 + .connect_timeout + .map(|d| Delay::new(Instant::now() + d)); + }; + + stream.set_nodelay(true).map_err(Error::connect)?; + if state.config.0.keepalives { + stream + .set_keepalive(Some(state.config.0.keepalives_idle)) + .map_err(Error::connect)?; + } + + transition!(Finished(Socket::new_tcp(stream))); + } +} + +impl ConnectSocketFuture { + pub fn new(config: Config, idx: usize) -> ConnectSocketFuture { + ConnectSocket::start(config, idx) + } +} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 515ebaf42..ceeffd07b 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -26,6 +26,8 @@ mod codec; mod connect; #[cfg(feature = "runtime")] mod connect_once; +#[cfg(feature = "runtime")] +mod connect_socket; mod connection; mod copy_in; mod copy_out; @@ -51,6 +53,8 @@ pub use crate::proto::codec::PostgresCodec; pub use crate::proto::connect::ConnectFuture; #[cfg(feature = "runtime")] pub use crate::proto::connect_once::ConnectOnceFuture; +#[cfg(feature = "runtime")] +pub use crate::proto::connect_socket::ConnectSocketFuture; pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; From 1f6d9ddc063f3408bb5135bf89c88e3d50f8674a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 6 Jan 2019 18:03:51 -0800 Subject: [PATCH 102/819] Overhaul query cancellation Multi-host support means we can't simply take the old approach - we need to know which of the hosts we actually connected to. It's also nice to move this from the connection to the client since that's what you'd normally have access to. --- tokio-postgres/src/config.rs | 19 +--- tokio-postgres/src/lib.rs | 48 ++++++--- tokio-postgres/src/proto/cancel_query.rs | 105 +++++++++++++++++++ tokio-postgres/src/proto/cancel_query_raw.rs | 20 ++-- tokio-postgres/src/proto/client.rs | 48 ++++++++- tokio-postgres/src/proto/connect_once.rs | 2 +- tokio-postgres/src/proto/connection.rs | 9 +- tokio-postgres/src/proto/handshake.rs | 90 +++++++++++----- tokio-postgres/src/proto/mod.rs | 4 + tokio-postgres/tests/test/main.rs | 5 +- tokio-postgres/tests/test/runtime.rs | 33 +++++- 11 files changed, 305 insertions(+), 78 deletions(-) create mode 100644 tokio-postgres/src/proto/cancel_query.rs diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index c37776ddf..cf796cec5 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -17,10 +17,10 @@ use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] use crate::proto::ConnectFuture; -use crate::proto::{CancelQueryRawFuture, HandshakeFuture}; -use crate::{CancelData, CancelQueryRaw, Error, Handshake, TlsMode}; +use crate::proto::HandshakeFuture; #[cfg(feature = "runtime")] use crate::{Connect, MakeTlsMode, Socket}; +use crate::{Error, Handshake, TlsMode}; #[cfg(feature = "runtime")] #[derive(Debug, Copy, Clone, PartialEq)] @@ -267,7 +267,7 @@ impl Config { S: AsyncRead + AsyncWrite, T: TlsMode, { - Handshake(HandshakeFuture::new(stream, tls_mode, self.clone())) + Handshake(HandshakeFuture::new(stream, tls_mode, self.clone(), None)) } #[cfg(feature = "runtime")] @@ -277,19 +277,6 @@ impl Config { { Connect(ConnectFuture::new(make_tls_mode, Ok(self.clone()))) } - - pub fn cancel_query_raw( - &self, - stream: S, - tls_mode: T, - cancel_data: CancelData, - ) -> CancelQueryRaw - where - S: AsyncRead + AsyncWrite, - T: TlsMode, - { - CancelQueryRaw(CancelQueryRawFuture::new(stream, tls_mode, cancel_data)) - } } impl FromStr for Config { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 1ae2e60d0..1a5317f5c 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -206,6 +206,22 @@ impl Client { BatchExecute(self.0.batch_execute(query)) } + #[cfg(feature = "runtime")] + pub fn cancel_query(&mut self, make_tls_mode: T) -> CancelQuery + where + T: MakeTlsMode, + { + CancelQuery(self.0.cancel_query(make_tls_mode)) + } + + pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> CancelQueryRaw + where + S: AsyncRead + AsyncWrite, + T: TlsMode, + { + CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) + } + pub fn is_closed(&self) -> bool { self.0.is_closed() } @@ -222,10 +238,6 @@ impl Connection where S: AsyncRead + AsyncWrite, { - pub fn cancel_data(&self) -> CancelData { - self.0.cancel_data() - } - pub fn parameter(&self, name: &str) -> Option<&str> { self.0.parameter(name) } @@ -274,6 +286,25 @@ where } } +#[cfg(feature = "runtime")] +#[must_use = "futures do nothing unless polled"] +pub struct CancelQuery(proto::CancelQueryFuture) +where + T: MakeTlsMode; + +#[cfg(feature = "runtime")] +impl Future for CancelQuery +where + T: MakeTlsMode, +{ + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll<(), Error> { + self.0.poll() + } +} + #[must_use = "futures do nothing unless polled"] pub struct Handshake(proto::HandshakeFuture) where @@ -478,15 +509,6 @@ impl Future for BatchExecute { } } -/// Contains information necessary to cancel queries for a session. -#[derive(Copy, Clone, Debug)] -pub struct CancelData { - /// The process ID of the session. - pub process_id: i32, - /// The secret key for the session. - pub secret_key: i32, -} - /// An asynchronous notification. #[derive(Clone, Debug)] pub struct Notification { diff --git a/tokio-postgres/src/proto/cancel_query.rs b/tokio-postgres/src/proto/cancel_query.rs new file mode 100644 index 000000000..61484803c --- /dev/null +++ b/tokio-postgres/src/proto/cancel_query.rs @@ -0,0 +1,105 @@ +use futures::{try_ready, Future, Poll}; +use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use std::io; + +use crate::proto::{CancelQueryRawFuture, ConnectSocketFuture}; +use crate::{Config, Error, Host, MakeTlsMode, Socket}; + +#[derive(StateMachineFuture)] +pub enum CancelQuery +where + T: MakeTlsMode, +{ + #[state_machine_future(start, transitions(ConnectingSocket))] + Start { + make_tls_mode: T, + idx: Option, + config: Config, + process_id: i32, + secret_key: i32, + }, + #[state_machine_future(transitions(Canceling))] + ConnectingSocket { + future: ConnectSocketFuture, + tls_mode: T::TlsMode, + process_id: i32, + secret_key: i32, + }, + #[state_machine_future(transitions(Finished))] + Canceling { + future: CancelQueryRawFuture, + }, + #[state_machine_future(ready)] + Finished(()), + #[state_machine_future(error)] + Failed(Error), +} + +impl PollCancelQuery for CancelQuery +where + T: MakeTlsMode, +{ + fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { + let mut state = state.take(); + + let idx = state.idx.ok_or_else(|| { + Error::connect(io::Error::new(io::ErrorKind::InvalidInput, "unknown host")) + })?; + + let hostname = match &state.config.0.host[idx] { + Host::Tcp(host) => &**host, + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter + #[cfg(unix)] + Host::Unix(_) => "", + }; + let tls_mode = state + .make_tls_mode + .make_tls_mode(hostname) + .map_err(|e| Error::tls(e.into()))?; + + transition!(ConnectingSocket { + future: ConnectSocketFuture::new(state.config, idx), + tls_mode, + process_id: state.process_id, + secret_key: state.secret_key, + }) + } + + fn poll_connecting_socket<'a>( + state: &'a mut RentToOwn<'a, ConnectingSocket>, + ) -> Poll, Error> { + let socket = try_ready!(state.future.poll()); + let state = state.take(); + + transition!(Canceling { + future: CancelQueryRawFuture::new( + socket, + state.tls_mode, + state.process_id, + state.secret_key + ), + }) + } + + fn poll_canceling<'a>( + state: &'a mut RentToOwn<'a, Canceling>, + ) -> Poll { + try_ready!(state.future.poll()); + transition!(Finished(())) + } +} + +impl CancelQueryFuture +where + T: MakeTlsMode, +{ + pub fn new( + make_tls_mode: T, + idx: Option, + config: Config, + process_id: i32, + secret_key: i32, + ) -> CancelQueryFuture { + CancelQuery::start(make_tls_mode, idx, config, process_id, secret_key) + } +} diff --git a/tokio-postgres/src/proto/cancel_query_raw.rs b/tokio-postgres/src/proto/cancel_query_raw.rs index 0b1c7534c..ae2aee451 100644 --- a/tokio-postgres/src/proto/cancel_query_raw.rs +++ b/tokio-postgres/src/proto/cancel_query_raw.rs @@ -6,7 +6,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::error::Error; use crate::proto::TlsFuture; -use crate::{CancelData, TlsMode}; +use crate::TlsMode; #[derive(StateMachineFuture)] pub enum CancelQueryRaw @@ -17,7 +17,8 @@ where #[state_machine_future(start, transitions(SendingCancel))] Start { future: TlsFuture, - cancel_data: CancelData, + process_id: i32, + secret_key: i32, }, #[state_machine_future(transitions(FlushingCancel))] SendingCancel { @@ -40,11 +41,7 @@ where let (stream, _) = try_ready!(state.future.poll()); let mut buf = vec![]; - frontend::cancel_request( - state.cancel_data.process_id, - state.cancel_data.secret_key, - &mut buf, - ); + frontend::cancel_request(state.process_id, state.secret_key, &mut buf); transition!(SendingCancel { future: io::write_all(stream, buf), @@ -74,7 +71,12 @@ where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, cancel_data: CancelData) -> CancelQueryRawFuture { - CancelQueryRaw::start(TlsFuture::new(stream, tls_mode), cancel_data) + pub fn new( + stream: S, + tls_mode: T, + process_id: i32, + secret_key: i32, + ) -> CancelQueryRawFuture { + CancelQueryRaw::start(TlsFuture::new(stream, tls_mode), process_id, secret_key) } } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index f41b5bfe7..feb06bd26 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -8,6 +8,7 @@ use postgres_protocol::message::frontend; use std::collections::HashMap; use std::error::Error as StdError; use std::sync::{Arc, Weak}; +use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::bind::BindFuture; use crate::proto::connection::{Request, RequestMessages}; @@ -20,8 +21,13 @@ use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; use crate::proto::simple_query::SimpleQueryStream; use crate::proto::statement::Statement; +#[cfg(feature = "runtime")] +use crate::proto::CancelQueryFuture; +use crate::proto::CancelQueryRawFuture; use crate::types::{IsNull, Oid, ToSql, Type}; -use crate::Error; +use crate::{Config, Error, TlsMode}; +#[cfg(feature = "runtime")] +use crate::{MakeTlsMode, Socket}; pub struct PendingRequest(Result<(RequestMessages, IdleGuard), Error>); @@ -44,13 +50,25 @@ struct Inner { state: Mutex, idle: IdleState, sender: mpsc::UnboundedSender, + process_id: i32, + secret_key: i32, + #[cfg_attr(not(feature = "runtime"), allow(dead_code))] + config: Config, + #[cfg_attr(not(feature = "runtime"), allow(dead_code))] + idx: Option, } #[derive(Clone)] pub struct Client(Arc); impl Client { - pub fn new(sender: mpsc::UnboundedSender) -> Client { + pub fn new( + sender: mpsc::UnboundedSender, + process_id: i32, + secret_key: i32, + config: Config, + idx: Option, + ) -> Client { Client(Arc::new(Inner { state: Mutex::new(State { types: HashMap::new(), @@ -60,6 +78,10 @@ impl Client { }), idle: IdleState::new(), sender, + process_id, + secret_key, + config, + idx, })) } @@ -222,6 +244,28 @@ impl Client { self.close(b'P', name) } + #[cfg(feature = "runtime")] + pub fn cancel_query(&self, make_tls_mode: T) -> CancelQueryFuture + where + T: MakeTlsMode, + { + CancelQueryFuture::new( + make_tls_mode, + self.0.idx, + self.0.config.clone(), + self.0.process_id, + self.0.secret_key, + ) + } + + pub fn cancel_query_raw(&self, stream: S, tls_mode: T) -> CancelQueryRawFuture + where + S: AsyncRead + AsyncWrite, + T: TlsMode, + { + CancelQueryRawFuture::new(stream, tls_mode, self.0.process_id, self.0.secret_key) + } + fn close(&self, ty: u8, name: &str) { let mut buf = vec![]; frontend::close(ty, name, &mut buf).expect("statement name not valid"); diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 2180e14fd..c784e2d72 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -65,7 +65,7 @@ where transition!(Handshaking { target_session_attrs: state.config.0.target_session_attrs, - future: HandshakeFuture::new(socket, state.tls_mode, state.config), + future: HandshakeFuture::new(socket, state.tls_mode, state.config, Some(state.idx)), }) } diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index e4c80fa17..dd9f30fe4 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -11,7 +11,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::codec::PostgresCodec; use crate::proto::copy_in::CopyInReceiver; use crate::proto::idle::IdleGuard; -use crate::{AsyncMessage, CancelData, Notification}; +use crate::{AsyncMessage, Notification}; use crate::{DbError, Error}; pub enum RequestMessages { @@ -42,7 +42,6 @@ enum State { pub struct Connection { stream: Framed, - cancel_data: CancelData, parameters: HashMap, receiver: mpsc::UnboundedReceiver, pending_request: Option, @@ -57,13 +56,11 @@ where { pub fn new( stream: Framed, - cancel_data: CancelData, parameters: HashMap, receiver: mpsc::UnboundedReceiver, ) -> Connection { Connection { stream, - cancel_data, parameters, receiver, pending_request: None, @@ -73,10 +70,6 @@ where } } - pub fn cancel_data(&self) -> CancelData { - self.cancel_data - } - pub fn parameter(&self, name: &str) -> Option<&str> { self.parameters.get(name).map(|s| &**s) } diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/handshake.rs index 6b27f0eae..1d245ae1e 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/handshake.rs @@ -8,12 +8,11 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::collections::HashMap; -use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; -use crate::{CancelData, ChannelBinding, Config, Error, TlsMode}; +use crate::{ChannelBinding, Config, Error, TlsMode}; #[derive(StateMachineFuture)] pub enum Handshake @@ -25,42 +24,56 @@ where Start { future: TlsFuture, config: Config, + idx: Option, }, #[state_machine_future(transitions(ReadingAuth))] SendingStartup { future: sink::Send>, config: Config, + idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] ReadingAuth { stream: Framed, config: Config, + idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingAuthCompletion))] SendingPassword { future: sink::Send>, + config: Config, + idx: Option, }, #[state_machine_future(transitions(ReadingSasl))] SendingSasl { future: sink::Send>, scram: ScramSha256, + config: Config, + idx: Option, }, #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] ReadingSasl { stream: Framed, scram: ScramSha256, + config: Config, + idx: Option, }, #[state_machine_future(transitions(ReadingInfo))] ReadingAuthCompletion { stream: Framed, + config: Config, + idx: Option, }, #[state_machine_future(transitions(Finished))] ReadingInfo { stream: Framed, - cancel_data: Option, + process_id: i32, + secret_key: i32, parameters: HashMap, + config: Config, + idx: Option, }, #[state_machine_future(ready)] Finished((Client, Connection)), @@ -99,6 +112,7 @@ where transition!(SendingStartup { future: stream.send(buf), config: state.config, + idx: state.idx, channel_binding, }) } @@ -111,6 +125,7 @@ where transition!(ReadingAuth { stream, config: state.config, + idx: state.idx, channel_binding: state.channel_binding, }) } @@ -124,8 +139,11 @@ where match message { Some(Message::AuthenticationOk) => transition!(ReadingInfo { stream: state.stream, - cancel_data: None, + process_id: 0, + secret_key: 0, parameters: HashMap::new(), + config: state.config, + idx: state.idx, }), Some(Message::AuthenticationCleartextPassword) => { let pass = state @@ -137,7 +155,9 @@ where let mut buf = vec![]; frontend::password_message(pass, &mut buf).map_err(Error::encode)?; transition!(SendingPassword { - future: state.stream.send(buf) + future: state.stream.send(buf), + config: state.config, + idx: state.idx, }) } Some(Message::AuthenticationMd5Password(body)) => { @@ -157,7 +177,9 @@ where let mut buf = vec![]; frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; transition!(SendingPassword { - future: state.stream.send(buf) + future: state.stream.send(buf), + config: state.config, + idx: state.idx, }) } Some(Message::AuthenticationSasl(body)) => { @@ -214,6 +236,8 @@ where transition!(SendingSasl { future: state.stream.send(buf), scram, + config: state.config, + idx: state.idx, }) } Some(Message::AuthenticationKerberosV5) @@ -232,7 +256,12 @@ where state: &'a mut RentToOwn<'a, SendingPassword>, ) -> Poll, Error> { let stream = try_ready!(state.future.poll().map_err(Error::io)); - transition!(ReadingAuthCompletion { stream }) + let state = state.take(); + transition!(ReadingAuthCompletion { + stream, + config: state.config, + idx: state.idx, + }) } fn poll_sending_sasl<'a>( @@ -243,6 +272,8 @@ where transition!(ReadingSasl { stream, scram: state.scram, + config: state.config, + idx: state.idx, }) } @@ -263,6 +294,8 @@ where transition!(SendingSasl { future: state.stream.send(buf), scram: state.scram, + config: state.config, + idx: state.idx, }) } Some(Message::AuthenticationSaslFinal(body)) => { @@ -271,7 +304,9 @@ where .finish(body.data()) .map_err(|e| Error::authentication(Box::new(e)))?; transition!(ReadingAuthCompletion { - stream: state.stream + stream: state.stream, + config: state.config, + idx: state.idx, }) } Some(Message::ErrorResponse(body)) => Err(Error::db(body)), @@ -289,8 +324,11 @@ where match message { Some(Message::AuthenticationOk) => transition!(ReadingInfo { stream: state.stream, - cancel_data: None, - parameters: HashMap::new() + process_id: 0, + secret_key: 0, + parameters: HashMap::new(), + config: state.config, + idx: state.idx, }), Some(Message::ErrorResponse(body)) => Err(Error::db(body)), Some(_) => Err(Error::unexpected_message()), @@ -305,10 +343,8 @@ where let message = try_ready!(state.stream.poll().map_err(Error::io)); match message { Some(Message::BackendKeyData(body)) => { - state.cancel_data = Some(CancelData { - process_id: body.process_id(), - secret_key: body.secret_key(), - }); + state.process_id = body.process_id(); + state.secret_key = body.secret_key(); } Some(Message::ParameterStatus(body)) => { state.parameters.insert( @@ -318,16 +354,15 @@ where } Some(Message::ReadyForQuery(_)) => { let state = state.take(); - let cancel_data = state.cancel_data.ok_or_else(|| { - Error::parse(io::Error::new( - io::ErrorKind::InvalidData, - "BackendKeyData message missing", - )) - })?; let (sender, receiver) = mpsc::unbounded(); - let client = Client::new(sender); - let connection = - Connection::new(state.stream, cancel_data, state.parameters, receiver); + let client = Client::new( + sender, + state.process_id, + state.secret_key, + state.config, + state.idx, + ); + let connection = Connection::new(state.stream, state.parameters, receiver); transition!(Finished((client, connection))) } Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), @@ -344,7 +379,12 @@ where S: AsyncRead + AsyncWrite, T: TlsMode, { - pub fn new(stream: S, tls_mode: T, config: Config) -> HandshakeFuture { - Handshake::start(TlsFuture::new(stream, tls_mode), config) + pub fn new( + stream: S, + tls_mode: T, + config: Config, + idx: Option, + ) -> HandshakeFuture { + Handshake::start(TlsFuture::new(stream, tls_mode), config, idx) } } diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index ceeffd07b..aff4a0a94 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -19,6 +19,8 @@ macro_rules! try_ready_closed { } mod bind; +#[cfg(feature = "runtime")] +mod cancel_query; mod cancel_query_raw; mod client; mod codec; @@ -46,6 +48,8 @@ mod typeinfo_composite; mod typeinfo_enum; pub use crate::proto::bind::BindFuture; +#[cfg(feature = "runtime")] +pub use crate::proto::cancel_query::CancelQueryFuture; pub use crate::proto::cancel_query_raw::CancelQueryRawFuture; pub use crate::proto::client::Client; pub use crate::proto::codec::PostgresCodec; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index f21e0d7fa..e68f697c2 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -222,12 +222,11 @@ fn query_portal() { } #[test] -fn cancel_query() { +fn cancel_query_raw() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let cancel_data = connection.cancel_data(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); @@ -245,7 +244,7 @@ fn cancel_query() { }) .then(|r| { let s = r.unwrap(); - tokio_postgres::Config::new().cancel_query_raw(s, NoTls, cancel_data) + client.cancel_query_raw(s, NoTls) }) .then(|r| { r.unwrap(); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index bdcd98d6c..691a51611 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,6 +1,8 @@ use futures::Future; +use std::time::{Duration, Instant}; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::NoTls; +use tokio::timer::Delay; +use tokio_postgres::{NoTls, SqlState}; fn smoke_test(s: &str) { let mut runtime = Runtime::new().unwrap(); @@ -67,3 +69,32 @@ fn target_session_attrs_err() { ); runtime.block_on(f).err().unwrap(); } + +#[test] +fn cancel_query() { + let mut runtime = Runtime::new().unwrap(); + + let connect = tokio_postgres::connect("host=localhost port=5433 user=postgres", NoTls); + let (mut client, connection) = runtime.block_on(connect).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let sleep = client + .batch_execute("SELECT pg_sleep(100)") + .then(|r| match r { + Ok(_) => panic!("unexpected success"), + Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), + Err(e) => panic!("unexpected error {}", e), + }); + let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) + .then(|r| { + r.unwrap(); + client.cancel_query(NoTls) + }) + .then(|r| { + r.unwrap(); + Ok::<(), ()>(()) + }); + + let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); +} From e372cdc6f99381d92aae15608d9f6a4a83794aa6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Jan 2019 20:43:52 -0800 Subject: [PATCH 103/819] Docs --- tokio-postgres/src/config.rs | 74 +++++++++++++ tokio-postgres/src/lib.rs | 160 +++++++++++++++++++++++------ tokio-postgres/src/proto/client.rs | 2 + 3 files changed, 203 insertions(+), 33 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index cf796cec5..fbd8bf3e8 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -22,10 +22,13 @@ use crate::proto::HandshakeFuture; use crate::{Connect, MakeTlsMode, Socket}; use crate::{Error, Handshake, TlsMode}; +/// Properties required of a database. #[cfg(feature = "runtime")] #[derive(Debug, Copy, Clone, PartialEq)] pub enum TargetSessionAttrs { + /// No special permissions are required. Any, + /// The database must be writable. ReadWrite, #[doc(hidden)] __NonExhaustive, @@ -60,6 +63,77 @@ pub(crate) struct Inner { pub(crate) target_session_attrs: TargetSessionAttrs, } +/// Connection configuration. +/// +/// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: +/// +/// # Key-Value +/// +/// This format consists of space-separated key-value pairs. Values which are either the empty string or contain +/// whitespace should be wrapped in `'`. `'` and `\` characters should be backslash-escaped. +/// +/// ## Keys +/// +/// * `user` - The username to authenticate with. Required. +/// * `password` - The password to authenticate with. +/// * `dbname` - The name of the database to connect to. Defaults to the username. +/// * `options` - Command line options used to configure the server. +/// * `application_name` - Sets the `application_name` parameter on the server. +/// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the +/// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts +/// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting +/// with the `connect` method. +/// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be +/// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if +/// omitted or the empty string. +/// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames +/// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. +/// This option is ignored when connecting with Unix sockets. Defaults to on. +/// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. +/// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that +/// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server +/// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// +/// ## Examples +/// +/// ```not_rust +/// host=localhost user=postgres connect_timeout=10 keepalives=0 +/// ``` +/// +/// ```not_rust +/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// ``` +/// +/// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write +/// ``` +/// +/// # Url +/// +/// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, +/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, +/// as the path component of the URL specifies the database name. +/// +/// ## Examples +/// +/// ```not_rust +/// postgresql://user@localhost +/// ``` +/// +/// ```not_rust +/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// ``` +/// +/// ```not_rust +/// postgresql://user@host1:1234,host2host3:5678?target_session_attrs=read-write +/// ``` +/// +/// ```not_rust +/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// ``` #[derive(Debug, Clone, PartialEq)] pub struct Config(pub(crate) Arc); diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 1a5317f5c..e27ca1d82 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -54,7 +54,7 @@ //! The client supports *pipelined* requests. Pipelining can improve performance in use cases in which multiple, //! independent queries need to be executed. In a traditional workflow, each query is sent to the server after the //! previous query completes. In contrast, pipelining allows the client to send all of the queries to the server up -//! front, eliminating time spent on both sides waiting for the other to finish sending data: +//! front, minimizing time spent by one side waiting for the other to finish sending data: //! //! ```not_rust //! Sequential Pipelined @@ -75,31 +75,7 @@ //! the connection to work concurrently when possible. //! //! Pipelining happens automatically when futures are polled concurrently (for example, by using the futures `join` -//! combinator). Say we want to prepare 2 statements: -//! -//! ```no_run -//! use futures::Future; -//! use tokio_postgres::{Client, Error, Statement}; -//! -//! fn prepare_sequential( -//! client: &mut Client, -//! ) -> impl Future -//! { -//! client.prepare("SELECT * FROM foo") -//! .and_then({ -//! let f = client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)"); -//! |s1| f.map(|s2| (s1, s2)) -//! }) -//! } -//! -//! fn prepare_pipelined( -//! client: &mut Client, -//! ) -> impl Future -//! { -//! client.prepare("SELECT * FROM foo") -//! .join(client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)")) -//! } -//! ``` +//! combinator). Say we want to prepare 2 statements. //! //! # Runtime //! @@ -143,6 +119,13 @@ fn next_portal() -> String { format!("p{}", ID.fetch_add(1, Ordering::SeqCst)) } +/// A convenience function which parses a connection string and connects to the database. +/// +/// See the documentation for [`Config`] for details on the connection string format. +/// +/// Requires the `runtime` Cargo feature (enabled by default). +/// +/// [`Config`]: ./Config.t.html #[cfg(feature = "runtime")] pub fn connect(config: &str, tls_mode: T) -> Connect where @@ -151,33 +134,73 @@ where Connect(proto::ConnectFuture::new(tls_mode, config.parse())) } +/// An asynchronous PostgreSQL client. +/// +/// The client is one half of what is returned when a connection is established. Users interact with the database +/// through this client object. pub struct Client(proto::Client); impl Client { + /// Creates a new prepared statement. + /// + /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), + /// which are set when executed. Prepared statements can only be used with the connection that created them. pub fn prepare(&mut self, query: &str) -> Prepare { self.prepare_typed(query, &[]) } + /// Like `prepare`, but allows the types of query parameters to be explicitly specified. + /// + /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be + /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. pub fn prepare_typed(&mut self, query: &str, param_types: &[Type]) -> Prepare { Prepare(self.0.prepare(next_statement(), query, param_types)) } + /// Executes a statement, returning the number of rows modified. + /// + /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Execute { Execute(self.0.execute(&statement.0, params)) } + /// Executes a statement, returning a stream of the resulting rows. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Query { Query(self.0.query(&statement.0, params)) } + /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. + /// + /// Portals only last for the duration of the transaction in which they are created - in particular, a portal + /// created outside of a transaction is immediately destroyed. Portals can only be used on the connection that + /// created them. + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Bind { Bind(self.0.bind(&statement.0, next_portal(), params)) } + /// Continues execution of a portal, returning a stream of the resulting rows. + /// + /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to + /// query_portal. If the requested number is negative or 0, all rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> QueryPortal { QueryPortal(self.0.query_portal(&portal.0, max_rows)) } + /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. + /// + /// The data in the provided stream is passed along to the server verbatim; it is the caller's responsibility to + /// ensure it uses the proper format. pub fn copy_in( &mut self, statement: &Statement, @@ -194,18 +217,36 @@ impl Client { CopyIn(self.0.copy_in(&statement.0, params, stream)) } + /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOut { CopyOut(self.0.copy_out(&statement.0, params)) } - pub fn transaction(&mut self) -> TransactionBuilder { - TransactionBuilder(self.0.clone()) - } - + /// Executes a sequence of SQL statements. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. This is intended for the execution of batches of non-dynamic statements, for example, the creation of + /// a schema for a fresh database. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! pub fn batch_execute(&mut self, query: &str) -> BatchExecute { BatchExecute(self.0.batch_execute(query)) } + pub fn transaction(&mut self) -> TransactionBuilder { + TransactionBuilder(self.0.clone()) + } + + /// Attempts to cancel an in-progress query. + /// + /// The server provides no information about whether a cancellation attempt was successful or not. An error will + /// only be returned if the client was unable to connect to the database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn cancel_query(&mut self, make_tls_mode: T) -> CancelQuery where @@ -214,6 +255,8 @@ impl Client { CancelQuery(self.0.cancel_query(make_tls_mode)) } + /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new + /// connection itself. pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> CancelQueryRaw where S: AsyncRead + AsyncWrite, @@ -222,15 +265,31 @@ impl Client { CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) } + /// Determines if the connection to the server has already closed. + /// + /// In that case, all future queries will fail. pub fn is_closed(&self) -> bool { self.0.is_closed() } + /// Polls the client to check if it is idle. + /// + /// A connection is idle if there are no outstanding requests, whether they have begun being polled or not. For + /// example, this can be used by a connection pool to ensure that all work done by one checkout is done before + /// making the client available for a new request. Otherwise, any non-completed work from the first request could + /// interleave with the second. pub fn poll_idle(&mut self) -> Poll<(), Error> { self.0.poll_idle() } } +/// A connection to a PostgreSQL database. +/// +/// This is one half of what is returned when a new connection is established. It performs the actual IO with the +/// server, and should generally be spawned off onto an executor to run in the background. +/// +/// `Connection` implements `Future`, and only resolves when the connection is closed, either because a fatal error has +/// occurred, or because its associated `Client` has dropped and all outstanding work has completed. #[must_use = "futures do nothing unless polled"] pub struct Connection(proto::Connection); @@ -238,10 +297,15 @@ impl Connection where S: AsyncRead + AsyncWrite, { + /// Returns the value of a runtime parameter for this connection. pub fn parameter(&self, name: &str) -> Option<&str> { self.0.parameter(name) } + /// Polls for asynchronous messages from the server. + /// + /// The server can send notices as well as notifications asynchronously to the client. Applications which wish to + /// examine those messages should use this method to drive the connection rather than its `Future` implementation. pub fn poll_message(&mut self) -> Poll, Error> { self.0.poll_message() } @@ -259,9 +323,16 @@ where } } +/// An asynchronous message from the server. #[allow(clippy::large_enum_variant)] pub enum AsyncMessage { + /// A notice. + /// + /// Notices use the same format as errors, but aren't "errors" per-se. Notice(DbError), + /// A notification. + /// + /// Connections can subscribe to notifications with the `LISTEN` command. Notification(Notification), #[doc(hidden)] __NonExhaustive, @@ -361,14 +432,19 @@ impl Future for Prepare { } } +/// A prepared statement. +/// +/// Prepared statements can only be used with the connection that created them. #[derive(Clone)] pub struct Statement(proto::Statement); impl Statement { + /// Returns the expected types of the statement's parameters. pub fn params(&self) -> &[Type] { self.0.params() } + /// Returns information about the columns returned when the statement is queried. pub fn columns(&self) -> &[Column] { self.0.columns() } @@ -426,6 +502,10 @@ impl Stream for QueryPortal { } } +/// A portal. +/// +/// Portals can only be used with the connection that created them, and only exist for the duration of the transaction +/// in which they were created. pub struct Portal(proto::Portal); #[must_use = "futures do nothing unless polled"] @@ -512,10 +592,24 @@ impl Future for BatchExecute { /// An asynchronous notification. #[derive(Clone, Debug)] pub struct Notification { + process_id: i32, + channel: String, + payload: String, +} + +impl Notification { /// The process ID of the notifying backend process. - pub process_id: i32, + pub fn process_id(&self) -> i32 { + self.process_id + } + /// The name of the channel that the notify has been raised on. - pub channel: String, + pub fn channel(&self) -> &str { + &self.channel + } + /// The "payload" string passed from the notifying process. - pub payload: String, + pub fn payload(&self) -> &str { + &self.payload + } } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index feb06bd26..f0f6603f5 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -284,6 +284,8 @@ impl Client { name: &str, params: &[&dyn ToSql], ) -> Result, Error> { + assert_eq!(statement.params().len(), params.len()); + let mut buf = vec![]; let r = frontend::bind( name, From 03d1a5aed3dc9da2260b532fee4c4c3931ba2dfe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Jan 2019 21:10:15 -0800 Subject: [PATCH 104/819] Fix build --- tokio-postgres/src/config.rs | 28 ++++++++++++++++++++++++++++ tokio-postgres/tests/test/main.rs | 10 +++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index fbd8bf3e8..d01c118dc 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -144,6 +144,7 @@ impl Default for Config { } impl Config { + /// Creates a new configuration. pub fn new() -> Config { Config(Arc::new(Inner { user: None, @@ -166,11 +167,15 @@ impl Config { })) } + /// Sets the user to authenticate with. + /// + /// Required. pub fn user(&mut self, user: &str) -> &mut Config { Arc::make_mut(&mut self.0).user = Some(user.to_string()); self } + /// Sets the password to authenticate with. pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, @@ -179,21 +184,32 @@ impl Config { self } + /// Sets the name of the database to connect to. + /// + /// Defaults to the user. pub fn dbname(&mut self, dbname: &str) -> &mut Config { Arc::make_mut(&mut self.0).dbname = Some(dbname.to_string()); self } + /// Sets command line options used to configure the server. pub fn options(&mut self, options: &str) -> &mut Config { Arc::make_mut(&mut self.0).options = Some(options.to_string()); self } + /// Sets the value of the `application_name` runtime parameter. pub fn application_name(&mut self, application_name: &str) -> &mut Config { Arc::make_mut(&mut self.0).application_name = Some(application_name.to_string()); self } + /// Adds a host to the configuration. + /// + /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix + /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] @@ -209,6 +225,11 @@ impl Config { self } + /// Adds a Unix socket host to the configuration. + /// + /// Unlike `host`, this method allows non-UTF8 paths. + /// + /// Requires the `runtime` Cargo feature (enabled by default) and a Unix target. #[cfg(all(feature = "runtime", unix))] pub fn host_path(&mut self, host: T) -> &mut Config where @@ -220,6 +241,13 @@ impl Config { self } + /// Adds a port to the configuration. + /// + /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which + /// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports + /// as hosts. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn port(&mut self, port: u16) -> &mut Config { Arc::make_mut(&mut self.0).port.push(port); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index e68f697c2..fc526510f 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -426,7 +426,7 @@ fn notifications() { while let Some(message) = try_ready!(connection.poll_message().map_err(|e| panic!("{}", e))) { if let AsyncMessage::Notification(notification) = message { - debug!("received {}", notification.payload); + debug!("received {}", notification.payload()); tx.unbounded_send(notification).unwrap(); } } @@ -452,10 +452,10 @@ fn notifications() { let notifications = rx.collect().wait().unwrap(); assert_eq!(notifications.len(), 2); - assert_eq!(notifications[0].channel, "test_notifications"); - assert_eq!(notifications[0].payload, "hello"); - assert_eq!(notifications[1].channel, "test_notifications"); - assert_eq!(notifications[1].payload, "world"); + assert_eq!(notifications[0].channel(), "test_notifications"); + assert_eq!(notifications[0].payload(), "hello"); + assert_eq!(notifications[1].channel(), "test_notifications"); + assert_eq!(notifications[1].payload(), "world"); } #[test] From 364a19b0b36335499da6a07a6ff86560f6c93975 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Jan 2019 21:19:09 -0800 Subject: [PATCH 105/819] Fix test --- tokio-postgres/src/proto/client.rs | 7 ++++++- tokio-postgres/tests/test/types/mod.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index f0f6603f5..6708e528f 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -284,7 +284,12 @@ impl Client { name: &str, params: &[&dyn ToSql], ) -> Result, Error> { - assert_eq!(statement.params().len(), params.len()); + assert!( + statement.params().len() == params.len(), + "expected {} parameters but got {}", + statement.params().len(), + params.len() + ); let mut buf = vec![]; let r = frontend::bind( diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 2ee54167e..ef9c7588e 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -522,7 +522,7 @@ fn domain() { let prepare = client.prepare("SELECT id FROM pg_temp.foo"); let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[&id]).collect(); + let query = client.query(&stmt, &[]).collect(); let rows = runtime.block_on(query).unwrap(); assert_eq!(id, rows[0].get(0)); } From 3a01c8c3a32474af069e5c5d0aca179fdf38c470 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Jan 2019 21:27:52 -0800 Subject: [PATCH 106/819] Finish config documentation --- tokio-postgres/src/config.rs | 44 +++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index d01c118dc..57553a71d 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -254,24 +254,46 @@ impl Config { self } + /// Sets the timeout applied to socket-level connection attempts. + /// + /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each + /// host separately. Defaults to no limit. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { Arc::make_mut(&mut self.0).connect_timeout = Some(connect_timeout); self } + /// Controls the use of TCP keepalive. + /// + /// This is ignored for Unix domain socket connections. Defaults to `true`. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { Arc::make_mut(&mut self.0).keepalives = keepalives; self } + /// Sets the amount of idle time before a keepalive packet is sent on the connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { Arc::make_mut(&mut self.0).keepalives_idle = keepalives_idle; self } + /// Sets the requirements of the session. + /// + /// This can be used to connect to the primary server in a clustered database rather than one of the read-only + /// secondary servers. Defaults to `Any`. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn target_session_attrs( &mut self, @@ -364,14 +386,9 @@ impl Config { Ok(()) } - pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake - where - S: AsyncRead + AsyncWrite, - T: TlsMode, - { - Handshake(HandshakeFuture::new(stream, tls_mode, self.clone(), None)) - } - + /// Opens a connection to a PostgreSQL database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn connect(&self, make_tls_mode: T) -> Connect where @@ -379,6 +396,17 @@ impl Config { { Connect(ConnectFuture::new(make_tls_mode, Ok(self.clone()))) } + + /// Connects to a PostgreSQL database over an arbitrary stream. + /// + /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. + pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake + where + S: AsyncRead + AsyncWrite, + T: TlsMode, + { + Handshake(HandshakeFuture::new(stream, tls_mode, self.clone(), None)) + } } impl FromStr for Config { From 5b045940f4fe739be96c5c49ff22a827eb7103b8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Jan 2019 21:45:09 -0800 Subject: [PATCH 107/819] Rename handshake to connect_raw --- tokio-postgres-native-tls/src/test.rs | 2 +- tokio-postgres-openssl/src/test.rs | 2 +- tokio-postgres/src/config.rs | 8 ++++---- tokio-postgres/src/lib.rs | 4 ++-- tokio-postgres/src/proto/connect_once.rs | 18 +++++++++--------- .../src/proto/{handshake.rs => connect_raw.rs} | 10 +++++----- tokio-postgres/src/proto/mod.rs | 4 ++-- tokio-postgres/tests/test/main.rs | 2 +- 8 files changed, 25 insertions(+), 25 deletions(-) rename tokio-postgres/src/proto/{handshake.rs => connect_raw.rs} (98%) diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index dda907f4b..cbb121870 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -17,7 +17,7 @@ where let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.handshake(s, tls)); + .and_then(|s| builder.connect_raw(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 9a1238496..aa0e3804e 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -17,7 +17,7 @@ where let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.handshake(s, tls)); + .and_then(|s| builder.connect_raw(s, tls)); let (mut client, connection) = runtime.block_on(handshake).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 57553a71d..002dea7c5 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -17,10 +17,10 @@ use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] use crate::proto::ConnectFuture; -use crate::proto::HandshakeFuture; +use crate::proto::ConnectRawFuture; #[cfg(feature = "runtime")] use crate::{Connect, MakeTlsMode, Socket}; -use crate::{Error, Handshake, TlsMode}; +use crate::{ConnectRaw, Error, TlsMode}; /// Properties required of a database. #[cfg(feature = "runtime")] @@ -400,12 +400,12 @@ impl Config { /// Connects to a PostgreSQL database over an arbitrary stream. /// /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. - pub fn handshake(&self, stream: S, tls_mode: T) -> Handshake + pub fn connect_raw(&self, stream: S, tls_mode: T) -> ConnectRaw where S: AsyncRead + AsyncWrite, T: TlsMode, { - Handshake(HandshakeFuture::new(stream, tls_mode, self.clone(), None)) + ConnectRaw(ConnectRawFuture::new(stream, tls_mode, self.clone(), None)) } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index e27ca1d82..6e4ee3c70 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -377,12 +377,12 @@ where } #[must_use = "futures do nothing unless polled"] -pub struct Handshake(proto::HandshakeFuture) +pub struct ConnectRaw(proto::ConnectRawFuture) where S: AsyncRead + AsyncWrite, T: TlsMode; -impl Future for Handshake +impl Future for ConnectRaw where S: AsyncRead + AsyncWrite, T: TlsMode, diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index c784e2d72..2f73df195 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -4,7 +4,7 @@ use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; -use crate::proto::{Client, ConnectSocketFuture, Connection, HandshakeFuture, SimpleQueryStream}; +use crate::proto::{Client, ConnectRawFuture, ConnectSocketFuture, Connection, SimpleQueryStream}; use crate::{Config, Error, Socket, TargetSessionAttrs, TlsMode}; #[derive(StateMachineFuture)] @@ -18,7 +18,7 @@ where tls_mode: T, config: Config, }, - #[state_machine_future(transitions(Handshaking))] + #[state_machine_future(transitions(ConnectingRaw))] ConnectingSocket { future: ConnectSocketFuture, idx: usize, @@ -26,8 +26,8 @@ where config: Config, }, #[state_machine_future(transitions(CheckingSessionAttrs, Finished))] - Handshaking { - future: HandshakeFuture, + ConnectingRaw { + future: ConnectRawFuture, target_session_attrs: TargetSessionAttrs, }, #[state_machine_future(transitions(Finished))] @@ -63,15 +63,15 @@ where let socket = try_ready!(state.future.poll()); let state = state.take(); - transition!(Handshaking { + transition!(ConnectingRaw { target_session_attrs: state.config.0.target_session_attrs, - future: HandshakeFuture::new(socket, state.tls_mode, state.config, Some(state.idx)), + future: ConnectRawFuture::new(socket, state.tls_mode, state.config, Some(state.idx)), }) } - fn poll_handshaking<'a>( - state: &'a mut RentToOwn<'a, Handshaking>, - ) -> Poll, Error> { + fn poll_connecting_raw<'a>( + state: &'a mut RentToOwn<'a, ConnectingRaw>, + ) -> Poll, Error> { let (client, connection) = try_ready!(state.future.poll()); if let TargetSessionAttrs::ReadWrite = state.target_session_attrs { diff --git a/tokio-postgres/src/proto/handshake.rs b/tokio-postgres/src/proto/connect_raw.rs similarity index 98% rename from tokio-postgres/src/proto/handshake.rs rename to tokio-postgres/src/proto/connect_raw.rs index 1d245ae1e..23f14562e 100644 --- a/tokio-postgres/src/proto/handshake.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -15,7 +15,7 @@ use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; use crate::{ChannelBinding, Config, Error, TlsMode}; #[derive(StateMachineFuture)] -pub enum Handshake +pub enum ConnectRaw where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -81,7 +81,7 @@ where Failed(Error), } -impl PollHandshake for Handshake +impl PollConnectRaw for ConnectRaw where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -374,7 +374,7 @@ where } } -impl HandshakeFuture +impl ConnectRawFuture where S: AsyncRead + AsyncWrite, T: TlsMode, @@ -384,7 +384,7 @@ where tls_mode: T, config: Config, idx: Option, - ) -> HandshakeFuture { - Handshake::start(TlsFuture::new(stream, tls_mode), config, idx) + ) -> ConnectRawFuture { + ConnectRaw::start(TlsFuture::new(stream, tls_mode), config, idx) } } diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index aff4a0a94..796ded1e2 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -28,13 +28,13 @@ mod codec; mod connect; #[cfg(feature = "runtime")] mod connect_once; +mod connect_raw; #[cfg(feature = "runtime")] mod connect_socket; mod connection; mod copy_in; mod copy_out; mod execute; -mod handshake; mod idle; mod portal; mod prepare; @@ -57,13 +57,13 @@ pub use crate::proto::codec::PostgresCodec; pub use crate::proto::connect::ConnectFuture; #[cfg(feature = "runtime")] pub use crate::proto::connect_once::ConnectOnceFuture; +pub use crate::proto::connect_raw::ConnectRawFuture; #[cfg(feature = "runtime")] pub use crate::proto::connect_socket::ConnectSocketFuture; pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; pub use crate::proto::execute::ExecuteFuture; -pub use crate::proto::handshake::HandshakeFuture; pub use crate::proto::portal::Portal; pub use crate::proto::prepare::PrepareFuture; pub use crate::proto::query::QueryStream; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index fc526510f..c770c9f4d 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -25,7 +25,7 @@ fn connect( let builder = s.parse::().unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) - .and_then(move |s| builder.handshake(s, NoTls)) + .and_then(move |s| builder.connect_raw(s, NoTls)) } fn smoke_test(s: &str) { From dfc614bed1ce88b5a978f1ffcd7dfe5a39c19b10 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Jan 2019 13:45:54 -0800 Subject: [PATCH 108/819] Doc fixes --- tokio-postgres/src/config.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 002dea7c5..f290e5f70 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -22,13 +22,13 @@ use crate::proto::ConnectRawFuture; use crate::{Connect, MakeTlsMode, Socket}; use crate::{ConnectRaw, Error, TlsMode}; -/// Properties required of a database. +/// Properties required of a session. #[cfg(feature = "runtime")] #[derive(Debug, Copy, Clone, PartialEq)] pub enum TargetSessionAttrs { - /// No special permissions are required. + /// No special properties are required. Any, - /// The database must be writable. + /// The session must allow writes. ReadWrite, #[doc(hidden)] __NonExhaustive, @@ -128,7 +128,7 @@ pub(crate) struct Inner { /// ``` /// /// ```not_rust -/// postgresql://user@host1:1234,host2host3:5678?target_session_attrs=read-write +/// postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write /// ``` /// /// ```not_rust From 2d3b9bb1c6a51887db69caa407b236e9c8b6b214 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 13 Jan 2019 14:53:19 -0800 Subject: [PATCH 109/819] Move the TLS mode into config --- postgres/src/client.rs | 8 +- postgres/src/config.rs | 8 +- tokio-postgres-native-tls/src/test.rs | 14 +- tokio-postgres-openssl/src/test.rs | 18 +- tokio-postgres/Cargo.toml | 1 - tokio-postgres/src/config.rs | 48 ++- tokio-postgres/src/lib.rs | 42 +-- tokio-postgres/src/proto/cancel_query.rs | 31 +- tokio-postgres/src/proto/cancel_query_raw.rs | 21 +- tokio-postgres/src/proto/client.rs | 18 +- tokio-postgres/src/proto/connect.rs | 41 ++- tokio-postgres/src/proto/connect_once.rs | 32 +- tokio-postgres/src/proto/connect_raw.rs | 35 +-- tokio-postgres/src/proto/maybe_tls_stream.rs | 88 ++++++ tokio-postgres/src/proto/mod.rs | 2 + tokio-postgres/src/proto/tls.rs | 78 +++-- tokio-postgres/src/tls.rs | 290 +++---------------- tokio-postgres/tests/test/main.rs | 5 +- 18 files changed, 356 insertions(+), 424 deletions(-) create mode 100644 tokio-postgres/src/proto/maybe_tls_stream.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 7c5ff34c0..3b2d4f9c5 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -3,7 +3,7 @@ use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::Error; #[cfg(feature = "runtime")] -use tokio_postgres::{MakeTlsMode, Socket, TlsMode}; +use tokio_postgres::{MakeTlsConnect, Socket, TlsConnect}; #[cfg(feature = "runtime")] use crate::Config; @@ -15,10 +15,10 @@ impl Client { #[cfg(feature = "runtime")] pub fn connect(params: &str, tls_mode: T) -> Result where - T: MakeTlsMode + 'static + Send, - T::TlsMode: Send, + T: MakeTlsConnect + 'static + Send, + T::TlsConnect: Send, T::Stream: Send, - >::Future: Send, + >::Future: Send, { params.parse::()?.connect(tls_mode) } diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 4c6147b73..dab5f538e 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,7 +4,7 @@ use log::error; use std::path::Path; use std::str::FromStr; use std::time::Duration; -use tokio_postgres::{Error, MakeTlsMode, Socket, TargetSessionAttrs, TlsMode}; +use tokio_postgres::{Error, MakeTlsConnect, Socket, TargetSessionAttrs, TlsConnect}; use crate::{Client, RUNTIME}; @@ -94,10 +94,10 @@ impl Config { pub fn connect(&self, tls_mode: T) -> Result where - T: MakeTlsMode + 'static + Send, - T::TlsMode: Send, + T: MakeTlsConnect + 'static + Send, + T::TlsConnect: Send, T::Stream: Send, - >::Future: Send, + >::Future: Send, { let connect = self.0.connect(tls_mode); let (client, connection) = oneshot::spawn(connect, &RUNTIME.executor()).wait()?; diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index cbb121870..284311813 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -2,13 +2,13 @@ use futures::{Future, Stream}; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; +use tokio_postgres::TlsConnect; use crate::TlsConnector; fn smoke_test(s: &str, tls: T) where - T: TlsMode, + T: TlsConnect, T::Stream: 'static, { let mut runtime = Runtime::new().unwrap(); @@ -44,8 +44,8 @@ fn require() { .build() .unwrap(); smoke_test( - "user=ssl_user dbname=postgres", - RequireTls(TlsConnector::with_connector(connector, "localhost")), + "user=ssl_user dbname=postgres sslmode=require", + TlsConnector::with_connector(connector, "localhost"), ); } @@ -59,7 +59,7 @@ fn prefer() { .unwrap(); smoke_test( "user=ssl_user dbname=postgres", - PreferTls(TlsConnector::with_connector(connector, "localhost")), + TlsConnector::with_connector(connector, "localhost"), ); } @@ -72,7 +72,7 @@ fn scram_user() { .build() .unwrap(); smoke_test( - "user=scram_user password=password dbname=postgres", - RequireTls(TlsConnector::with_connector(connector, "localhost")), + "user=scram_user password=password dbname=postgres sslmode=require", + TlsConnector::with_connector(connector, "localhost"), ); } diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index aa0e3804e..2dc336c4e 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -2,13 +2,13 @@ use futures::{Future, Stream}; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::{self, PreferTls, RequireTls, TlsMode}; +use tokio_postgres::TlsConnect; use super::*; fn smoke_test(s: &str, tls: T) where - T: TlsMode, + T: TlsConnect, T::Stream: 'static, { let mut runtime = Runtime::new().unwrap(); @@ -41,8 +41,8 @@ fn require() { builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( - "user=ssl_user dbname=postgres", - RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), + "user=ssl_user dbname=postgres sslmode=require", + TlsConnector::new(ctx.configure().unwrap(), "localhost"), ); } @@ -53,7 +53,7 @@ fn prefer() { let ctx = builder.build(); smoke_test( "user=ssl_user dbname=postgres", - PreferTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), + TlsConnector::new(ctx.configure().unwrap(), "localhost"), ); } @@ -63,8 +63,8 @@ fn scram_user() { builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( - "user=scram_user password=password dbname=postgres", - RequireTls(TlsConnector::new(ctx.configure().unwrap(), "localhost")), + "user=scram_user password=password dbname=postgres sslmode=require", + TlsConnector::new(ctx.configure().unwrap(), "localhost"), ); } @@ -78,8 +78,8 @@ fn runtime() { let connector = MakeTlsConnector::new(builder.build()); let connect = tokio_postgres::connect( - "host=localhost port=5433 user=postgres", - RequireTls(connector), + "host=localhost port=5433 user=postgres sslmode=require", + connector, ); let (mut client, connection) = runtime.block_on(connect).unwrap(); let connection = connection.map_err(|e| panic!("{}", e)); diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 35a121e6a..d47a105cc 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -49,7 +49,6 @@ postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } state_machine_future = "0.1.7" tokio-codec = "0.1" tokio-io = "0.1" -void = "1.0" tokio-tcp = { version = "0.1", optional = true } futures-cpupool = { version = "0.1", optional = true } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index f290e5f70..b38c1a0c8 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -19,8 +19,8 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::ConnectFuture; use crate::proto::ConnectRawFuture; #[cfg(feature = "runtime")] -use crate::{Connect, MakeTlsMode, Socket}; -use crate::{ConnectRaw, Error, TlsMode}; +use crate::{Connect, MakeTlsConnect, Socket}; +use crate::{ConnectRaw, Error, TlsConnect}; /// Properties required of a session. #[cfg(feature = "runtime")] @@ -34,6 +34,17 @@ pub enum TargetSessionAttrs { __NonExhaustive, } +/// TLS configuration. +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum SslMode { + /// Do not use TLS. + Disable, + /// Attempt to connect with TLS but allow sessions without. + Prefer, + /// Require the use of TLS. + Require, +} + #[cfg(feature = "runtime")] #[derive(Debug, Clone, PartialEq)] pub(crate) enum Host { @@ -49,6 +60,7 @@ pub(crate) struct Inner { pub(crate) dbname: Option, pub(crate) options: Option, pub(crate) application_name: Option, + pub(crate) ssl_mode: SslMode, #[cfg(feature = "runtime")] pub(crate) host: Vec, #[cfg(feature = "runtime")] @@ -79,6 +91,8 @@ pub(crate) struct Inner { /// * `dbname` - The name of the database to connect to. Defaults to the username. /// * `options` - Command line options used to configure the server. /// * `application_name` - Sets the `application_name` parameter on the server. +/// * `sslmode` - Controls usage of TLS. If set to `disable`, TLS will not be used. If set to `prefer`, TLS will be used +/// if available, but not used otherwise. If set to `require`, TLS will be forced to be used. Defaults to `prefer`. /// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting @@ -152,6 +166,7 @@ impl Config { dbname: None, options: None, application_name: None, + ssl_mode: SslMode::Prefer, #[cfg(feature = "runtime")] host: vec![], #[cfg(feature = "runtime")] @@ -204,6 +219,14 @@ impl Config { self } + /// Sets the SSL configuration. + /// + /// Defaults to `prefer`. + pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { + Arc::make_mut(&mut self.0).ssl_mode = ssl_mode; + self + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix @@ -320,6 +343,15 @@ impl Config { "application_name" => { self.application_name(&value); } + "sslmode" => { + let mode = match value { + "disable" => SslMode::Disable, + "prefer" => SslMode::Prefer, + "require" => SslMode::Require, + _ => return Err(Error::config_parse(Box::new(InvalidValue("sslmode")))), + }; + self.ssl_mode(mode); + } #[cfg(feature = "runtime")] "host" => { for host in value.split(',') { @@ -390,22 +422,22 @@ impl Config { /// /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] - pub fn connect(&self, make_tls_mode: T) -> Connect + pub fn connect(&self, tls: T) -> Connect where - T: MakeTlsMode, + T: MakeTlsConnect, { - Connect(ConnectFuture::new(make_tls_mode, Ok(self.clone()))) + Connect(ConnectFuture::new(tls, Ok(self.clone()))) } /// Connects to a PostgreSQL database over an arbitrary stream. /// /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. - pub fn connect_raw(&self, stream: S, tls_mode: T) -> ConnectRaw + pub fn connect_raw(&self, stream: S, tls: T) -> ConnectRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { - ConnectRaw(ConnectRawFuture::new(stream, tls_mode, self.clone(), None)) + ConnectRaw(ConnectRawFuture::new(stream, tls, self.clone(), None)) } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 6e4ee3c70..0019337ea 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -127,11 +127,11 @@ fn next_portal() -> String { /// /// [`Config`]: ./Config.t.html #[cfg(feature = "runtime")] -pub fn connect(config: &str, tls_mode: T) -> Connect +pub fn connect(config: &str, tls: T) -> Connect where - T: MakeTlsMode, + T: MakeTlsConnect, { - Connect(proto::ConnectFuture::new(tls_mode, config.parse())) + Connect(proto::ConnectFuture::new(tls, config.parse())) } /// An asynchronous PostgreSQL client. @@ -250,7 +250,7 @@ impl Client { #[cfg(feature = "runtime")] pub fn cancel_query(&mut self, make_tls_mode: T) -> CancelQuery where - T: MakeTlsMode, + T: MakeTlsConnect, { CancelQuery(self.0.cancel_query(make_tls_mode)) } @@ -260,7 +260,7 @@ impl Client { pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> CancelQueryRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) } @@ -291,11 +291,12 @@ impl Client { /// `Connection` implements `Future`, and only resolves when the connection is closed, either because a fatal error has /// occurred, or because its associated `Client` has dropped and all outstanding work has completed. #[must_use = "futures do nothing unless polled"] -pub struct Connection(proto::Connection); +pub struct Connection(proto::Connection>); -impl Connection +impl Connection where S: AsyncRead + AsyncWrite, + T: AsyncRead + AsyncWrite, { /// Returns the value of a runtime parameter for this connection. pub fn parameter(&self, name: &str) -> Option<&str> { @@ -311,9 +312,10 @@ where } } -impl Future for Connection +impl Future for Connection where S: AsyncRead + AsyncWrite, + T: AsyncRead + AsyncWrite, { type Item = (); type Error = Error; @@ -342,12 +344,12 @@ pub enum AsyncMessage { pub struct CancelQueryRaw(proto::CancelQueryRawFuture) where S: AsyncRead + AsyncWrite, - T: TlsMode; + T: TlsConnect; impl Future for CancelQueryRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { type Item = (); type Error = Error; @@ -361,12 +363,12 @@ where #[must_use = "futures do nothing unless polled"] pub struct CancelQuery(proto::CancelQueryFuture) where - T: MakeTlsMode; + T: MakeTlsConnect; #[cfg(feature = "runtime")] impl Future for CancelQuery where - T: MakeTlsMode, + T: MakeTlsConnect, { type Item = (); type Error = Error; @@ -380,17 +382,17 @@ where pub struct ConnectRaw(proto::ConnectRawFuture) where S: AsyncRead + AsyncWrite, - T: TlsMode; + T: TlsConnect; impl Future for ConnectRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { - type Item = (Client, Connection); + type Item = (Client, Connection); type Error = Error; - fn poll(&mut self) -> Poll<(Client, Connection), Error> { + fn poll(&mut self) -> Poll<(Client, Connection), Error> { let (client, connection) = try_ready!(self.0.poll()); Ok(Async::Ready((Client(client), Connection(connection)))) @@ -401,17 +403,17 @@ where #[must_use = "futures do nothing unless polled"] pub struct Connect(proto::ConnectFuture) where - T: MakeTlsMode; + T: MakeTlsConnect; #[cfg(feature = "runtime")] impl Future for Connect where - T: MakeTlsMode, + T: MakeTlsConnect, { - type Item = (Client, Connection); + type Item = (Client, Connection); type Error = Error; - fn poll(&mut self) -> Poll<(Client, Connection), Error> { + fn poll(&mut self) -> Poll<(Client, Connection), Error> { let (client, connection) = try_ready!(self.0.poll()); Ok(Async::Ready((Client(client), Connection(connection)))) diff --git a/tokio-postgres/src/proto/cancel_query.rs b/tokio-postgres/src/proto/cancel_query.rs index 61484803c..1a7377c27 100644 --- a/tokio-postgres/src/proto/cancel_query.rs +++ b/tokio-postgres/src/proto/cancel_query.rs @@ -3,16 +3,16 @@ use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; use crate::proto::{CancelQueryRawFuture, ConnectSocketFuture}; -use crate::{Config, Error, Host, MakeTlsMode, Socket}; +use crate::{Config, Error, Host, MakeTlsConnect, Socket, SslMode}; #[derive(StateMachineFuture)] pub enum CancelQuery where - T: MakeTlsMode, + T: MakeTlsConnect, { #[state_machine_future(start, transitions(ConnectingSocket))] Start { - make_tls_mode: T, + tls: T, idx: Option, config: Config, process_id: i32, @@ -21,13 +21,14 @@ where #[state_machine_future(transitions(Canceling))] ConnectingSocket { future: ConnectSocketFuture, - tls_mode: T::TlsMode, + mode: SslMode, + tls: T::TlsConnect, process_id: i32, secret_key: i32, }, #[state_machine_future(transitions(Finished))] Canceling { - future: CancelQueryRawFuture, + future: CancelQueryRawFuture, }, #[state_machine_future(ready)] Finished(()), @@ -37,7 +38,7 @@ where impl PollCancelQuery for CancelQuery where - T: MakeTlsMode, + T: MakeTlsConnect, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); @@ -52,14 +53,15 @@ where #[cfg(unix)] Host::Unix(_) => "", }; - let tls_mode = state - .make_tls_mode - .make_tls_mode(hostname) + let tls = state + .tls + .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; transition!(ConnectingSocket { + mode: state.config.0.ssl_mode, future: ConnectSocketFuture::new(state.config, idx), - tls_mode, + tls, process_id: state.process_id, secret_key: state.secret_key, }) @@ -74,7 +76,8 @@ where transition!(Canceling { future: CancelQueryRawFuture::new( socket, - state.tls_mode, + state.mode, + state.tls, state.process_id, state.secret_key ), @@ -91,15 +94,15 @@ where impl CancelQueryFuture where - T: MakeTlsMode, + T: MakeTlsConnect, { pub fn new( - make_tls_mode: T, + tls: T, idx: Option, config: Config, process_id: i32, secret_key: i32, ) -> CancelQueryFuture { - CancelQuery::start(make_tls_mode, idx, config, process_id, secret_key) + CancelQuery::start(tls, idx, config, process_id, secret_key) } } diff --git a/tokio-postgres/src/proto/cancel_query_raw.rs b/tokio-postgres/src/proto/cancel_query_raw.rs index ae2aee451..522fe318b 100644 --- a/tokio-postgres/src/proto/cancel_query_raw.rs +++ b/tokio-postgres/src/proto/cancel_query_raw.rs @@ -5,14 +5,14 @@ use tokio_io::io::{self, Flush, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; use crate::error::Error; -use crate::proto::TlsFuture; -use crate::TlsMode; +use crate::proto::{MaybeTlsStream, TlsFuture}; +use crate::{SslMode, TlsConnect}; #[derive(StateMachineFuture)] pub enum CancelQueryRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { #[state_machine_future(start, transitions(SendingCancel))] Start { @@ -22,10 +22,12 @@ where }, #[state_machine_future(transitions(FlushingCancel))] SendingCancel { - future: WriteAll>, + future: WriteAll, Vec>, }, #[state_machine_future(transitions(Finished))] - FlushingCancel { future: Flush }, + FlushingCancel { + future: Flush>, + }, #[state_machine_future(ready)] Finished(()), #[state_machine_future(error)] @@ -35,7 +37,7 @@ where impl PollCancelQueryRaw for CancelQueryRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let (stream, _) = try_ready!(state.future.poll()); @@ -69,14 +71,15 @@ where impl CancelQueryRawFuture where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { pub fn new( stream: S, - tls_mode: T, + mode: SslMode, + tls: T, process_id: i32, secret_key: i32, ) -> CancelQueryRawFuture { - CancelQueryRaw::start(TlsFuture::new(stream, tls_mode), process_id, secret_key) + CancelQueryRaw::start(TlsFuture::new(stream, mode, tls), process_id, secret_key) } } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 6708e528f..a9e0d532e 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -25,9 +25,9 @@ use crate::proto::statement::Statement; use crate::proto::CancelQueryFuture; use crate::proto::CancelQueryRawFuture; use crate::types::{IsNull, Oid, ToSql, Type}; -use crate::{Config, Error, TlsMode}; +use crate::{Config, Error, TlsConnect}; #[cfg(feature = "runtime")] -use crate::{MakeTlsMode, Socket}; +use crate::{MakeTlsConnect, Socket}; pub struct PendingRequest(Result<(RequestMessages, IdleGuard), Error>); @@ -247,7 +247,7 @@ impl Client { #[cfg(feature = "runtime")] pub fn cancel_query(&self, make_tls_mode: T) -> CancelQueryFuture where - T: MakeTlsMode, + T: MakeTlsConnect, { CancelQueryFuture::new( make_tls_mode, @@ -258,12 +258,18 @@ impl Client { ) } - pub fn cancel_query_raw(&self, stream: S, tls_mode: T) -> CancelQueryRawFuture + pub fn cancel_query_raw(&self, stream: S, mode: T) -> CancelQueryRawFuture where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { - CancelQueryRawFuture::new(stream, tls_mode, self.0.process_id, self.0.secret_key) + CancelQueryRawFuture::new( + stream, + self.0.config.0.ssl_mode, + mode, + self.0.process_id, + self.0.secret_key, + ) } fn close(&self, ty: u8, name: &str) { diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index afc1d012b..1bc3f481a 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,35 +1,35 @@ use futures::{Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use crate::proto::{Client, ConnectOnceFuture, Connection}; -use crate::{Config, Error, Host, MakeTlsMode, Socket}; +use crate::proto::{Client, ConnectOnceFuture, Connection, MaybeTlsStream}; +use crate::{Config, Error, Host, MakeTlsConnect, Socket}; #[derive(StateMachineFuture)] pub enum Connect where - T: MakeTlsMode, + T: MakeTlsConnect, { #[state_machine_future(start, transitions(Connecting))] Start { - make_tls_mode: T, + tls: T, config: Result, }, #[state_machine_future(transitions(Finished))] Connecting { - future: ConnectOnceFuture, + future: ConnectOnceFuture, idx: usize, - make_tls_mode: T, + tls: T, config: Config, }, #[state_machine_future(ready)] - Finished((Client, Connection)), + Finished((Client, Connection>)), #[state_machine_future(error)] Failed(Error), } impl PollConnect for Connect where - T: MakeTlsMode, + T: MakeTlsConnect, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let mut state = state.take(); @@ -50,15 +50,15 @@ where #[cfg(unix)] Host::Unix(_) => "", }; - let tls_mode = state - .make_tls_mode - .make_tls_mode(hostname) + let tls = state + .tls + .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; transition!(Connecting { - future: ConnectOnceFuture::new(0, tls_mode, config.clone()), + future: ConnectOnceFuture::new(0, tls, config.clone()), idx: 0, - make_tls_mode: state.make_tls_mode, + tls: state.tls, config, }) } @@ -84,13 +84,12 @@ where #[cfg(unix)] Host::Unix(_) => "", }; - let tls_mode = state - .make_tls_mode - .make_tls_mode(hostname) + let tls = state + .tls + .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - state.future = - ConnectOnceFuture::new(state.idx, tls_mode, state.config.clone()); + state.future = ConnectOnceFuture::new(state.idx, tls, state.config.clone()); } } } @@ -99,9 +98,9 @@ where impl ConnectFuture where - T: MakeTlsMode, + T: MakeTlsConnect, { - pub fn new(make_tls_mode: T, config: Result) -> ConnectFuture { - Connect::start(make_tls_mode, config) + pub fn new(tls: T, config: Result) -> ConnectFuture { + Connect::start(tls, config) } } diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 2f73df195..750c9ba26 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -4,25 +4,23 @@ use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; -use crate::proto::{Client, ConnectRawFuture, ConnectSocketFuture, Connection, SimpleQueryStream}; -use crate::{Config, Error, Socket, TargetSessionAttrs, TlsMode}; +use crate::proto::{ + Client, ConnectRawFuture, ConnectSocketFuture, Connection, MaybeTlsStream, SimpleQueryStream, +}; +use crate::{Config, Error, Socket, TargetSessionAttrs, TlsConnect}; #[derive(StateMachineFuture)] pub enum ConnectOnce where - T: TlsMode, + T: TlsConnect, { #[state_machine_future(start, transitions(ConnectingSocket))] - Start { - idx: usize, - tls_mode: T, - config: Config, - }, + Start { idx: usize, tls: T, config: Config }, #[state_machine_future(transitions(ConnectingRaw))] ConnectingSocket { future: ConnectSocketFuture, idx: usize, - tls_mode: T, + tls: T, config: Config, }, #[state_machine_future(transitions(CheckingSessionAttrs, Finished))] @@ -34,17 +32,17 @@ where CheckingSessionAttrs { stream: SimpleQueryStream, client: Client, - connection: Connection, + connection: Connection>, }, #[state_machine_future(ready)] - Finished((Client, Connection)), + Finished((Client, Connection>)), #[state_machine_future(error)] Failed(Error), } impl PollConnectOnce for ConnectOnce where - T: TlsMode, + T: TlsConnect, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let state = state.take(); @@ -52,7 +50,7 @@ where transition!(ConnectingSocket { future: ConnectSocketFuture::new(state.config.clone(), state.idx), idx: state.idx, - tls_mode: state.tls_mode, + tls: state.tls, config: state.config, }) } @@ -65,7 +63,7 @@ where transition!(ConnectingRaw { target_session_attrs: state.config.0.target_session_attrs, - future: ConnectRawFuture::new(socket, state.tls_mode, state.config, Some(state.idx)), + future: ConnectRawFuture::new(socket, state.tls, state.config, Some(state.idx)), }) } @@ -111,9 +109,9 @@ where impl ConnectOnceFuture where - T: TlsMode, + T: TlsConnect, { - pub fn new(idx: usize, tls_mode: T, config: Config) -> ConnectOnceFuture { - ConnectOnce::start(idx, tls_mode, config) + pub fn new(idx: usize, tls: T, config: Config) -> ConnectOnceFuture { + ConnectOnce::start(idx, tls, config) } } diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs index 23f14562e..12efae333 100644 --- a/tokio-postgres/src/proto/connect_raw.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -11,14 +11,14 @@ use std::collections::HashMap; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::{Client, Connection, PostgresCodec, TlsFuture}; -use crate::{ChannelBinding, Config, Error, TlsMode}; +use crate::proto::{Client, Connection, MaybeTlsStream, PostgresCodec, TlsFuture}; +use crate::{ChannelBinding, Config, Error, TlsConnect}; #[derive(StateMachineFuture)] pub enum ConnectRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { #[state_machine_future(start, transitions(SendingStartup))] Start { @@ -28,47 +28,47 @@ where }, #[state_machine_future(transitions(ReadingAuth))] SendingStartup { - future: sink::Send>, + future: sink::Send, PostgresCodec>>, config: Config, idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] ReadingAuth { - stream: Framed, + stream: Framed, PostgresCodec>, config: Config, idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingAuthCompletion))] SendingPassword { - future: sink::Send>, + future: sink::Send, PostgresCodec>>, config: Config, idx: Option, }, #[state_machine_future(transitions(ReadingSasl))] SendingSasl { - future: sink::Send>, + future: sink::Send, PostgresCodec>>, scram: ScramSha256, config: Config, idx: Option, }, #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] ReadingSasl { - stream: Framed, + stream: Framed, PostgresCodec>, scram: ScramSha256, config: Config, idx: Option, }, #[state_machine_future(transitions(ReadingInfo))] ReadingAuthCompletion { - stream: Framed, + stream: Framed, PostgresCodec>, config: Config, idx: Option, }, #[state_machine_future(transitions(Finished))] ReadingInfo { - stream: Framed, + stream: Framed, PostgresCodec>, process_id: i32, secret_key: i32, parameters: HashMap, @@ -76,7 +76,7 @@ where idx: Option, }, #[state_machine_future(ready)] - Finished((Client, Connection)), + Finished((Client, Connection>)), #[state_machine_future(error)] Failed(Error), } @@ -84,7 +84,7 @@ where impl PollConnectRaw for ConnectRaw where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let (stream, channel_binding) = try_ready!(state.future.poll()); @@ -377,14 +377,9 @@ where impl ConnectRawFuture where S: AsyncRead + AsyncWrite, - T: TlsMode, + T: TlsConnect, { - pub fn new( - stream: S, - tls_mode: T, - config: Config, - idx: Option, - ) -> ConnectRawFuture { - ConnectRaw::start(TlsFuture::new(stream, tls_mode), config, idx) + pub fn new(stream: S, tls: T, config: Config, idx: Option) -> ConnectRawFuture { + ConnectRaw::start(TlsFuture::new(stream, config.0.ssl_mode, tls), config, idx) } } diff --git a/tokio-postgres/src/proto/maybe_tls_stream.rs b/tokio-postgres/src/proto/maybe_tls_stream.rs new file mode 100644 index 000000000..928674f28 --- /dev/null +++ b/tokio-postgres/src/proto/maybe_tls_stream.rs @@ -0,0 +1,88 @@ +use bytes::{Buf, BufMut}; +use futures::Poll; +use std::io::{self, Read, Write}; +use tokio_io::{AsyncRead, AsyncWrite}; + +pub enum MaybeTlsStream { + Raw(T), + Tls(U), +} + +impl Read for MaybeTlsStream +where + T: Read, + U: Read, +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match self { + MaybeTlsStream::Raw(s) => s.read(buf), + MaybeTlsStream::Tls(s) => s.read(buf), + } + } +} + +impl AsyncRead for MaybeTlsStream +where + T: AsyncRead, + U: AsyncRead, +{ + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + match self { + MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), + MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), + } + } + + fn read_buf(&mut self, buf: &mut B) -> Poll + where + B: BufMut, + { + match self { + MaybeTlsStream::Raw(s) => s.read_buf(buf), + MaybeTlsStream::Tls(s) => s.read_buf(buf), + } + } +} + +impl Write for MaybeTlsStream +where + T: Write, + U: Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + match self { + MaybeTlsStream::Raw(s) => s.write(buf), + MaybeTlsStream::Tls(s) => s.write(buf), + } + } + + fn flush(&mut self) -> io::Result<()> { + match self { + MaybeTlsStream::Raw(s) => s.flush(), + MaybeTlsStream::Tls(s) => s.flush(), + } + } +} + +impl AsyncWrite for MaybeTlsStream +where + T: AsyncWrite, + U: AsyncWrite, +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + match self { + MaybeTlsStream::Raw(s) => s.shutdown(), + MaybeTlsStream::Tls(s) => s.shutdown(), + } + } + + fn write_buf(&mut self, buf: &mut B) -> Poll + where + B: Buf, + { + match self { + MaybeTlsStream::Raw(s) => s.write_buf(buf), + MaybeTlsStream::Tls(s) => s.write_buf(buf), + } + } +} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 796ded1e2..7667901c3 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -36,6 +36,7 @@ mod copy_in; mod copy_out; mod execute; mod idle; +mod maybe_tls_stream; mod portal; mod prepare; mod query; @@ -64,6 +65,7 @@ pub use crate::proto::connection::Connection; pub use crate::proto::copy_in::CopyInFuture; pub use crate::proto::copy_out::CopyOutStream; pub use crate::proto::execute::ExecuteFuture; +pub use crate::proto::maybe_tls_stream::MaybeTlsStream; pub use crate::proto::portal::Portal; pub use crate::proto::prepare::PrepareFuture; pub use crate::proto::query::QueryStream; diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs index 64a217251..e4274a6e9 100644 --- a/tokio-postgres/src/proto/tls.rs +++ b/tokio-postgres/src/proto/tls.rs @@ -4,54 +4,65 @@ use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use tokio_io::io::{self, ReadExact, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::{ChannelBinding, Error, TlsMode}; +use crate::proto::MaybeTlsStream; +use crate::tls::private::ForcePrivateApi; +use crate::{ChannelBinding, Error, SslMode, TlsConnect}; #[derive(StateMachineFuture)] pub enum Tls where - T: TlsMode, + T: TlsConnect, S: AsyncRead + AsyncWrite, { - #[state_machine_future(start, transitions(SendingTls, ConnectingTls))] - Start { stream: S, tls_mode: T }, + #[state_machine_future(start, transitions(SendingTls, Ready))] + Start { stream: S, mode: SslMode, tls: T }, #[state_machine_future(transitions(ReadingTls))] SendingTls { future: WriteAll>, - tls_mode: T, + mode: SslMode, + tls: T, }, - #[state_machine_future(transitions(ConnectingTls))] + #[state_machine_future(transitions(ConnectingTls, Ready))] ReadingTls { future: ReadExact, - tls_mode: T, + mode: SslMode, + tls: T, }, #[state_machine_future(transitions(Ready))] ConnectingTls { future: T::Future }, #[state_machine_future(ready)] - Ready((T::Stream, ChannelBinding)), + Ready((MaybeTlsStream, ChannelBinding)), #[state_machine_future(error)] Failed(Error), } impl PollTls for Tls where - T: TlsMode, + T: TlsConnect, S: AsyncRead + AsyncWrite, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { let state = state.take(); - if state.tls_mode.request_tls() { - let mut buf = vec![]; - frontend::ssl_request(&mut buf); + match state.mode { + SslMode::Disable => transition!(Ready(( + MaybeTlsStream::Raw(state.stream), + ChannelBinding::none() + ))), + SslMode::Prefer if !state.tls.can_connect(ForcePrivateApi) => transition!(Ready(( + MaybeTlsStream::Raw(state.stream), + ChannelBinding::none() + ))), + SslMode::Prefer | SslMode::Require => { + let mut buf = vec![]; + frontend::ssl_request(&mut buf); - transition!(SendingTls { - future: io::write_all(state.stream, buf), - tls_mode: state.tls_mode, - }) - } else { - transition!(ConnectingTls { - future: state.tls_mode.handle_tls(false, state.stream), - }) + transition!(SendingTls { + future: io::write_all(state.stream, buf), + mode: state.mode, + tls: state.tls, + }) + } } } @@ -62,7 +73,8 @@ where let state = state.take(); transition!(ReadingTls { future: io::read_exact(stream, [0]), - tls_mode: state.tls_mode, + mode: state.mode, + tls: state.tls, }) } @@ -72,26 +84,32 @@ where let (stream, buf) = try_ready!(state.future.poll().map_err(Error::io)); let state = state.take(); - let use_tls = buf[0] == b'S'; - transition!(ConnectingTls { - future: state.tls_mode.handle_tls(use_tls, stream) - }) + if buf[0] == b'S' { + transition!(ConnectingTls { + future: state.tls.connect(stream), + }) + } else if state.mode == SslMode::Require { + Err(Error::tls("server does not support TLS".into())) + } else { + transition!(Ready((MaybeTlsStream::Raw(stream), ChannelBinding::none()))) + } } fn poll_connecting_tls<'a>( state: &'a mut RentToOwn<'a, ConnectingTls>, ) -> Poll, Error> { - let t = try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); - transition!(Ready(t)) + let (stream, channel_binding) = + try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); + transition!(Ready((MaybeTlsStream::Tls(stream), channel_binding))) } } impl TlsFuture where - T: TlsMode, + T: TlsConnect, S: AsyncRead + AsyncWrite, { - pub fn new(stream: S, tls_mode: T) -> TlsFuture { - Tls::start(stream, tls_mode) + pub fn new(stream: S, mode: SslMode, tls: T) -> TlsFuture { + Tls::start(stream, mode, tls) } } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index a5170dd48..7eabfab34 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -1,11 +1,13 @@ -use bytes::{Buf, BufMut}; use futures::future::{self, FutureResult}; -use futures::{try_ready, Async, Future, Poll}; +use futures::{Future, Poll}; use std::error::Error; use std::fmt; use std::io::{self, Read, Write}; use tokio_io::{AsyncRead, AsyncWrite}; -use void::Void; + +pub(crate) mod private { + pub struct ForcePrivateApi; +} pub struct ChannelBinding { pub(crate) tls_server_end_point: Option>, @@ -25,25 +27,6 @@ impl ChannelBinding { } } -#[cfg(feature = "runtime")] -pub trait MakeTlsMode { - type Stream: AsyncRead + AsyncWrite; - type TlsMode: TlsMode; - type Error: Into>; - - fn make_tls_mode(&mut self, domain: &str) -> Result; -} - -pub trait TlsMode { - type Stream: AsyncRead + AsyncWrite; - type Error: Into>; - type Future: Future; - - fn request_tls(&self) -> bool; - - fn handle_tls(self, use_tls: bool, stream: S) -> Self::Future; -} - #[cfg(feature = "runtime")] pub trait MakeTlsConnect { type Stream: AsyncRead + AsyncWrite; @@ -59,271 +42,74 @@ pub trait TlsConnect { type Future: Future; fn connect(self, stream: S) -> Self::Future; -} - -#[derive(Debug, Copy, Clone)] -pub struct NoTls; - -#[cfg(feature = "runtime")] -impl MakeTlsMode for NoTls -where - S: AsyncRead + AsyncWrite, -{ - type Stream = S; - type TlsMode = NoTls; - type Error = Void; - - fn make_tls_mode(&mut self, _: &str) -> Result { - Ok(NoTls) - } -} - -impl TlsMode for NoTls -where - S: AsyncRead + AsyncWrite, -{ - type Stream = S; - type Error = Void; - type Future = FutureResult<(S, ChannelBinding), Void>; - - fn request_tls(&self) -> bool { - false - } - - fn handle_tls(self, use_tls: bool, stream: S) -> FutureResult<(S, ChannelBinding), Void> { - debug_assert!(!use_tls); - future::ok((stream, ChannelBinding::none())) + #[doc(hidden)] + fn can_connect(&self, _: private::ForcePrivateApi) -> bool { + true } } #[derive(Debug, Copy, Clone)] -pub struct PreferTls(pub T); +pub struct NoTls; #[cfg(feature = "runtime")] -impl MakeTlsMode for PreferTls -where - T: MakeTlsConnect, - S: AsyncRead + AsyncWrite, -{ - type Stream = MaybeTlsStream; - type TlsMode = PreferTls; - type Error = T::Error; +impl MakeTlsConnect for NoTls where { + type Stream = NoTlsStream; + type TlsConnect = NoTls; + type Error = NoTlsError; - fn make_tls_mode(&mut self, domain: &str) -> Result, T::Error> { - self.0.make_tls_connect(domain).map(PreferTls) + fn make_tls_connect(&mut self, _: &str) -> Result { + Ok(NoTls) } } -impl TlsMode for PreferTls -where - T: TlsConnect, - S: AsyncRead + AsyncWrite, -{ - type Stream = MaybeTlsStream; - type Error = T::Error; - type Future = PreferTlsFuture; - - fn request_tls(&self) -> bool { - true - } - - fn handle_tls(self, use_tls: bool, stream: S) -> PreferTlsFuture { - let f = if use_tls { - PreferTlsFutureInner::Tls(self.0.connect(stream)) - } else { - PreferTlsFutureInner::Raw(Some(stream)) - }; +impl TlsConnect for NoTls { + type Stream = NoTlsStream; + type Error = NoTlsError; + type Future = FutureResult<(NoTlsStream, ChannelBinding), NoTlsError>; - PreferTlsFuture(f) + fn connect(self, _: S) -> FutureResult<(NoTlsStream, ChannelBinding), NoTlsError> { + future::err(NoTlsError(())) } -} - -enum PreferTlsFutureInner { - Tls(F), - Raw(Option), -} -pub struct PreferTlsFuture(PreferTlsFutureInner); - -impl Future for PreferTlsFuture -where - F: Future, -{ - type Item = (MaybeTlsStream, ChannelBinding); - type Error = F::Error; - - fn poll(&mut self) -> Poll<(MaybeTlsStream, ChannelBinding), F::Error> { - match &mut self.0 { - PreferTlsFutureInner::Tls(f) => { - let (stream, channel_binding) = try_ready!(f.poll()); - Ok(Async::Ready((MaybeTlsStream::Tls(stream), channel_binding))) - } - PreferTlsFutureInner::Raw(s) => Ok(Async::Ready(( - MaybeTlsStream::Raw(s.take().expect("future polled after completion")), - ChannelBinding::none(), - ))), - } + fn can_connect(&self, _: private::ForcePrivateApi) -> bool { + false } } -pub enum MaybeTlsStream { - Tls(T), - Raw(U), -} +pub enum NoTlsStream {} -impl Read for MaybeTlsStream -where - T: Read, - U: Read, -{ - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match self { - MaybeTlsStream::Tls(s) => s.read(buf), - MaybeTlsStream::Raw(s) => s.read(buf), - } +impl Read for NoTlsStream { + fn read(&mut self, _: &mut [u8]) -> io::Result { + match *self {} } } -impl AsyncRead for MaybeTlsStream -where - T: AsyncRead, - U: AsyncRead, -{ - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - match self { - MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), - MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), - } - } - - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - match self { - MaybeTlsStream::Tls(s) => s.read_buf(buf), - MaybeTlsStream::Raw(s) => s.read_buf(buf), - } - } -} +impl AsyncRead for NoTlsStream {} -impl Write for MaybeTlsStream -where - T: Write, - U: Write, -{ - fn write(&mut self, buf: &[u8]) -> io::Result { - match self { - MaybeTlsStream::Tls(s) => s.write(buf), - MaybeTlsStream::Raw(s) => s.write(buf), - } +impl Write for NoTlsStream { + fn write(&mut self, _: &[u8]) -> io::Result { + match *self {} } fn flush(&mut self) -> io::Result<()> { - match self { - MaybeTlsStream::Tls(s) => s.flush(), - MaybeTlsStream::Raw(s) => s.flush(), - } + match *self {} } } -impl AsyncWrite for MaybeTlsStream -where - T: AsyncWrite, - U: AsyncWrite, -{ +impl AsyncWrite for NoTlsStream { fn shutdown(&mut self) -> Poll<(), io::Error> { - match self { - MaybeTlsStream::Tls(s) => s.shutdown(), - MaybeTlsStream::Raw(s) => s.shutdown(), - } - } - - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - match self { - MaybeTlsStream::Tls(s) => s.write_buf(buf), - MaybeTlsStream::Raw(s) => s.write_buf(buf), - } - } -} - -#[derive(Debug, Copy, Clone)] -pub struct RequireTls(pub T); - -#[cfg(feature = "runtime")] -impl MakeTlsMode for RequireTls -where - T: MakeTlsConnect, -{ - type Stream = T::Stream; - type TlsMode = RequireTls; - type Error = T::Error; - - fn make_tls_mode(&mut self, domain: &str) -> Result, T::Error> { - self.0.make_tls_connect(domain).map(RequireTls) - } -} - -impl TlsMode for RequireTls -where - T: TlsConnect, -{ - type Stream = T::Stream; - type Error = Box; - type Future = RequireTlsFuture; - - fn request_tls(&self) -> bool { - true - } - - fn handle_tls(self, use_tls: bool, stream: S) -> RequireTlsFuture { - let f = if use_tls { - Ok(self.0.connect(stream)) - } else { - Err(TlsUnsupportedError(()).into()) - }; - - RequireTlsFuture { f: Some(f) } + match *self {} } } #[derive(Debug)] -pub struct TlsUnsupportedError(()); +pub struct NoTlsError(()); -impl fmt::Display for TlsUnsupportedError { +impl fmt::Display for NoTlsError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.write_str("TLS was required but not supported by the server") + fmt.write_str("no TLS implementation configured") } } -impl Error for TlsUnsupportedError {} - -pub struct RequireTlsFuture { - f: Option>>, -} - -impl Future for RequireTlsFuture -where - T: Future, - T::Error: Into>, -{ - type Item = T::Item; - type Error = Box; - - fn poll(&mut self) -> Poll> { - match self.f.take().expect("future polled after completion") { - Ok(mut f) => match f.poll().map_err(Into::into)? { - Async::Ready(r) => Ok(Async::Ready(r)), - Async::NotReady => { - self.f = Some(Ok(f)); - Ok(Async::NotReady) - } - }, - Err(e) => Err(e), - } - } -} +impl Error for NoTlsError {} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index c770c9f4d..b7001ddc5 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -12,7 +12,7 @@ use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, Client, Connection, NoTls}; +use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, NoTlsStream}; mod parse; #[cfg(feature = "runtime")] @@ -21,7 +21,8 @@ mod types; fn connect( s: &str, -) -> impl Future), Error = tokio_postgres::Error> { +) -> impl Future), Error = tokio_postgres::Error> +{ let builder = s.parse::().unwrap(); TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .map_err(|e| panic!("{}", e)) From e3f2eb72444e3b3fe75599bb660cb625cb9752df Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 13 Jan 2019 15:41:59 -0800 Subject: [PATCH 110/819] Update sync config for sslmode --- postgres/src/config.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index dab5f538e..49536cb0a 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,7 +4,7 @@ use log::error; use std::path::Path; use std::str::FromStr; use std::time::Duration; -use tokio_postgres::{Error, MakeTlsConnect, Socket, TargetSessionAttrs, TlsConnect}; +use tokio_postgres::{Error, MakeTlsConnect, Socket, SslMode, TargetSessionAttrs, TlsConnect}; use crate::{Client, RUNTIME}; @@ -50,6 +50,11 @@ impl Config { self } + pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { + self.0.ssl_mode(ssl_mode); + self + } + pub fn host(&mut self, host: &str) -> &mut Config { self.0.host(host); self From 07078871d9c993478fe6f12030103afb3a2e4c58 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Jan 2019 21:44:59 -0800 Subject: [PATCH 111/819] Get rid of StringRow It's not exposed by any methods, and the API isn't great. --- tokio-postgres/src/proto/connect_once.rs | 4 +- tokio-postgres/src/proto/simple_query.rs | 11 +++--- tokio-postgres/src/row.rs | 47 ------------------------ 3 files changed, 8 insertions(+), 54 deletions(-) diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 750c9ba26..197deea11 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -1,5 +1,6 @@ #![allow(clippy::large_enum_variant)] +use fallible_iterator::FallibleIterator; use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; @@ -92,7 +93,8 @@ where match try_ready!(state.stream.poll()) { Some(row) => { - if row.get(0) == Some("on") { + let range = row.ranges().next().map_err(Error::parse)?.and_then(|r| r); + if range.map(|r| &row.buffer()[r]) == Some(b"on") { Err(Error::connect(io::Error::new( io::ErrorKind::PermissionDenied, "database does not allow writes", diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index 519bb0e12..9354f028f 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -1,10 +1,10 @@ use futures::sync::mpsc; use futures::{Async, Poll, Stream}; -use postgres_protocol::message::backend::Message; +use postgres_protocol::message::backend::{DataRowBody, Message}; use std::mem; use crate::proto::client::{Client, PendingRequest}; -use crate::{Error, StringRow}; +use crate::Error; pub enum State { Start { @@ -20,10 +20,10 @@ pub enum State { pub struct SimpleQueryStream(State); impl Stream for SimpleQueryStream { - type Item = StringRow; + type Item = DataRowBody; type Error = Error; - fn poll(&mut self) -> Poll, Error> { + fn poll(&mut self) -> Poll, Error> { loop { match mem::replace(&mut self.0, State::Done) { State::Start { client, request } => { @@ -48,8 +48,7 @@ impl Stream for SimpleQueryStream { } Some(Message::DataRow(body)) => { self.0 = State::ReadResponse { receiver }; - let row = StringRow::new(body)?; - return Ok(Async::Ready(Some(row))); + return Ok(Async::Ready(Some(body))); } Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), Some(Message::ReadyForQuery(_)) => return Ok(Async::Ready(None)), diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index b2f6b69ec..fb09f4916 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -134,50 +134,3 @@ impl Row { value.map(Some).map_err(Error::from_sql) } } - -pub struct StringRow { - body: DataRowBody, - ranges: Vec>>, -} - -impl StringRow { - #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(body: DataRowBody) -> Result { - let ranges = body.ranges().collect().map_err(Error::parse)?; - Ok(StringRow { body, ranges }) - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn len(&self) -> usize { - self.ranges.len() - } - - pub fn get(&self, idx: usize) -> Option<&str> { - match self.try_get(idx) { - Ok(Some(ok)) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), - Ok(None) => panic!("no such column {}", idx), - } - } - - #[allow(clippy::option_option)] // FIXME - pub fn try_get(&self, idx: usize) -> Result>, Error> { - let buf = match self.ranges.get(idx) { - Some(range) => range.clone().map(|r| &self.body.buffer()[r]), - None => return Ok(None), - }; - - let v = match buf { - Some(buf) => { - let s = str::from_utf8(buf).map_err(|e| Error::from_sql(Box::new(e)))?; - Some(s) - } - None => None, - }; - - Ok(Some(v)) - } -} From 41243ae04f6aedb8a6818550668db1517d199c64 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Jan 2019 22:08:13 -0800 Subject: [PATCH 112/819] Little doc cleanup --- tokio-postgres/src/lib.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0019337ea..d600b74a0 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -75,7 +75,18 @@ //! the connection to work concurrently when possible. //! //! Pipelining happens automatically when futures are polled concurrently (for example, by using the futures `join` -//! combinator). Say we want to prepare 2 statements. +//! combinator): +//! +//! ```rust +//! use futures::Future; +//! use tokio_postgres::{Client, Error, Statement}; +//! +//! fn pipelined_prepare(client: &mut Client) -> impl Future +//! { +//! client.prepare("SELECT * FROM foo") +//! .join(client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)")) +//! } +//! ``` //! //! # Runtime //! From 826788d7d9c1403c68bd4a0efdfdfab2e3f02ae9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 16 Jan 2019 18:30:04 -0800 Subject: [PATCH 113/819] Add a MakeTlsConnector for native_tls --- tokio-postgres-native-tls/Cargo.toml | 6 ++++- tokio-postgres-native-tls/src/lib.rs | 32 +++++++++++++++++++++++++- tokio-postgres-native-tls/src/test.rs | 33 ++++++++++++++++++++++++--- 3 files changed, 66 insertions(+), 5 deletions(-) diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 6ba2d66ef..9c9383483 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -4,11 +4,15 @@ version = "0.1.0" authors = ["Steven Fackler "] edition = "2018" +[features] +default = ["runtime"] +runtime = ["tokio-postgres/runtime"] + [dependencies] futures = "0.1" native-tls = "0.2" tokio-io = "0.1" -tokio-tls = "0.2" +tokio-tls = "0.2.1" tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } [dev-dependencies] diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index db4ecac62..56798a74a 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -2,19 +2,49 @@ use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; +#[cfg(feature = "runtime")] +use tokio_postgres::MakeTlsConnect; use tokio_postgres::{ChannelBinding, TlsConnect}; use tokio_tls::{Connect, TlsStream}; #[cfg(test)] mod test; +#[cfg(feature = "runtime")] +#[derive(Clone)] +pub struct MakeTlsConnector(tokio_tls::TlsConnector); + +#[cfg(feature = "runtime")] +impl MakeTlsConnector { + pub fn new(connector: native_tls::TlsConnector) -> MakeTlsConnector { + MakeTlsConnector(tokio_tls::TlsConnector::from(connector)) + } +} + +#[cfg(feature = "runtime")] +impl MakeTlsConnect for MakeTlsConnector +where + S: AsyncRead + AsyncWrite, +{ + type Stream = TlsStream; + type TlsConnect = TlsConnector; + type Error = native_tls::Error; + + fn make_tls_connect(&mut self, domain: &str) -> Result { + Ok(TlsConnector { + connector: self.0.clone(), + domain: domain.to_string(), + }) + } +} + pub struct TlsConnector { connector: tokio_tls::TlsConnector, domain: String, } impl TlsConnector { - pub fn with_connector(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { + pub fn new(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { connector: tokio_tls::TlsConnector::from(connector), domain: domain.to_string(), diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 284311813..39938f3c8 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -4,6 +4,8 @@ use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; use tokio_postgres::TlsConnect; +#[cfg(feature = "runtime")] +use crate::MakeTlsConnector; use crate::TlsConnector; fn smoke_test(s: &str, tls: T) @@ -45,7 +47,7 @@ fn require() { .unwrap(); smoke_test( "user=ssl_user dbname=postgres sslmode=require", - TlsConnector::with_connector(connector, "localhost"), + TlsConnector::new(connector, "localhost"), ); } @@ -59,7 +61,7 @@ fn prefer() { .unwrap(); smoke_test( "user=ssl_user dbname=postgres", - TlsConnector::with_connector(connector, "localhost"), + TlsConnector::new(connector, "localhost"), ); } @@ -73,6 +75,31 @@ fn scram_user() { .unwrap(); smoke_test( "user=scram_user password=password dbname=postgres sslmode=require", - TlsConnector::with_connector(connector, "localhost"), + TlsConnector::new(connector, "localhost"), ); } + +#[test] +#[cfg(feature = "runtime")] +fn runtime() { + let mut runtime = Runtime::new().unwrap(); + + let connector = native_tls::TlsConnector::builder() + .add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ) + .build() + .unwrap(); + let connector = MakeTlsConnector::new(connector); + + let connect = tokio_postgres::connect( + "host=localhost port=5433 user=postgres sslmode=require", + connector, + ); + let (mut client, connection) = runtime.block_on(connect).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.spawn(connection); + + let execute = client.batch_execute("SELECT 1"); + runtime.block_on(execute).unwrap(); +} From 5e998f6fee90d1e7d38fa2787feacc84dc4b0f9d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 16 Jan 2019 18:32:26 -0800 Subject: [PATCH 114/819] Simplify native tls connector --- tokio-postgres-native-tls/src/lib.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 56798a74a..6d31c4a7c 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -12,12 +12,12 @@ mod test; #[cfg(feature = "runtime")] #[derive(Clone)] -pub struct MakeTlsConnector(tokio_tls::TlsConnector); +pub struct MakeTlsConnector(native_tls::TlsConnector); #[cfg(feature = "runtime")] impl MakeTlsConnector { pub fn new(connector: native_tls::TlsConnector) -> MakeTlsConnector { - MakeTlsConnector(tokio_tls::TlsConnector::from(connector)) + MakeTlsConnector(connector) } } @@ -31,10 +31,7 @@ where type Error = native_tls::Error; fn make_tls_connect(&mut self, domain: &str) -> Result { - Ok(TlsConnector { - connector: self.0.clone(), - domain: domain.to_string(), - }) + Ok(TlsConnector::new(self.0.clone(), domain)) } } From b3fdf168e5717f28e7248925a92105085b01b61d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 17 Jan 2019 20:35:12 -0800 Subject: [PATCH 115/819] Add a warning about transaction semantics --- tokio-postgres/src/lib.rs | 21 ++++++++++++++++- tokio-postgres/tests/test/main.rs | 38 ++++++++++++++++--------------- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d600b74a0..9a18af49a 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -248,7 +248,25 @@ impl Client { BatchExecute(self.0.batch_execute(query)) } - pub fn transaction(&mut self) -> TransactionBuilder { + /// A utility method to wrap a future in a database transaction. + /// + /// The returned future will start a transaction and then run the provided future. If the future returns `Ok`, it + /// will commit the transaction, and if it returns `Err`, it will roll the transaction back. + /// + /// This is simply a convenience API; it's roughly equivalent to: + /// + /// ```ignore + /// client.batch_execute("BEGIN") + /// .and_then(your_future) + /// .and_then(client.batch_execute("COMMIT")) + /// .or_else(|e| client.batch_execute("ROLLBACK").then(|_| Err(e))) + /// ``` + /// + /// # Warning + /// + /// Unlike the other futures created by a client, this future is *not* atomic with respect to other requests. If you + /// attempt to execute it concurrently with other futures created by the same connection, they will interleave! + pub fn build_transaction(&mut self) -> TransactionBuilder { TransactionBuilder(self.0.clone()) } @@ -556,6 +574,7 @@ impl Stream for CopyOut { } } +/// A builder type which can wrap a future in a database transaction. pub struct TransactionBuilder(proto::Client); impl TransactionBuilder { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index b7001ddc5..965d22f4b 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -478,7 +478,9 @@ fn transaction_commit() { .unwrap(); let f = client.batch_execute("INSERT INTO foo (name) VALUES ('steven')"); - runtime.block_on(client.transaction().build(f)).unwrap(); + runtime + .block_on(client.build_transaction().build(f)) + .unwrap(); let rows = runtime .block_on( @@ -514,7 +516,9 @@ fn transaction_abort() { .batch_execute("INSERT INTO foo (name) VALUES ('steven')") .map_err(|e| Box::new(e) as Box) .and_then(|_| Err::<(), _>(Box::::from(""))); - runtime.block_on(client.transaction().build(f)).unwrap_err(); + runtime + .block_on(client.build_transaction().build(f)) + .unwrap_err(); let rows = runtime .block_on( @@ -647,27 +651,25 @@ fn transaction_builder_around_moved_client() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); - let transaction_builder = client.transaction(); - let work = future::lazy(move || { - let execute = client.batch_execute( + let transaction_builder = client.build_transaction(); + let work = client + .batch_execute( "CREATE TEMPORARY TABLE transaction_foo ( - id SERIAL, - name TEXT - )", - ); - - execute.and_then(move |_| { + id SERIAL, + name TEXT + )", + ) + .and_then(move |_| { client .prepare("INSERT INTO transaction_foo (name) VALUES ($1), ($2)") .map(|statement| (client, statement)) }) - }) - .and_then(|(mut client, statement)| { - client - .query(&statement, &[&"jim", &"joe"]) - .collect() - .map(|_res| client) - }); + .and_then(|(mut client, statement)| { + client + .query(&statement, &[&"jim", &"joe"]) + .collect() + .map(|_res| client) + }); let transaction = transaction_builder.build(work); let mut client = runtime.block_on(transaction).unwrap(); From 0ca084d676f701608228f5c34e7cc9bc06e37374 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 17 Jan 2019 20:45:51 -0800 Subject: [PATCH 116/819] Add ordering note --- tokio-postgres/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 9a18af49a..5c8779e9e 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -49,6 +49,12 @@ //! tokio::run(fut); //! ``` //! +//! # Behavior +//! +//! Calling a method like `Client::query` on its own does nothing. The associated request is not sent to the database +//! until the future returned by the method is first polled. Requests are executed in the order that they are first +//! polled, not in the order that their futures are created. +//! //! # Pipelining //! //! The client supports *pipelined* requests. Pipelining can improve performance in use cases in which multiple, From 725b7153b61aa0d2ada7e5517946c908cb0892fa Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 17 Jan 2019 21:11:24 -0800 Subject: [PATCH 117/819] Move futures/streams to a submodule --- postgres/src/copy_out_reader.rs | 5 +- postgres/src/query.rs | 5 +- postgres/src/query_portal.rs | 5 +- tokio-postgres/src/config.rs | 7 +- tokio-postgres/src/impls.rs | 178 +++++++++++++++++++++++++ tokio-postgres/src/lib.rs | 210 +++--------------------------- tokio-postgres/tests/test/main.rs | 3 +- 7 files changed, 215 insertions(+), 198 deletions(-) create mode 100644 tokio-postgres/src/impls.rs diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 9cf8174b0..0bea15be1 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -2,10 +2,11 @@ use bytes::{Buf, Bytes}; use futures::stream::{self, Stream}; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; +use tokio_postgres::impls; use tokio_postgres::Error; pub struct CopyOutReader<'a> { - it: stream::Wait, + it: stream::Wait, cur: Cursor, _p: PhantomData<&'a mut ()>, } @@ -17,7 +18,7 @@ impl<'a> Drop for CopyOutReader<'a> { impl<'a> CopyOutReader<'a> { #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(stream: tokio_postgres::CopyOut) -> Result, Error> { + pub(crate) fn new(stream: impls::CopyOut) -> Result, Error> { let mut it = stream.wait(); let cur = match it.next() { Some(Ok(cur)) => cur, diff --git a/postgres/src/query.rs b/postgres/src/query.rs index e7eb7948c..7376fa99f 100644 --- a/postgres/src/query.rs +++ b/postgres/src/query.rs @@ -1,10 +1,11 @@ use fallible_iterator::FallibleIterator; use futures::stream::{self, Stream}; use std::marker::PhantomData; +use tokio_postgres::impls; use tokio_postgres::{Error, Row}; pub struct Query<'a> { - it: stream::Wait, + it: stream::Wait, _p: PhantomData<&'a mut ()>, } @@ -14,7 +15,7 @@ impl<'a> Drop for Query<'a> { } impl<'a> Query<'a> { - pub(crate) fn new(stream: tokio_postgres::Query) -> Query<'a> { + pub(crate) fn new(stream: impls::Query) -> Query<'a> { Query { it: stream.wait(), _p: PhantomData, diff --git a/postgres/src/query_portal.rs b/postgres/src/query_portal.rs index 0ed8250a0..d2ee40679 100644 --- a/postgres/src/query_portal.rs +++ b/postgres/src/query_portal.rs @@ -1,10 +1,11 @@ use fallible_iterator::FallibleIterator; use futures::stream::{self, Stream}; use std::marker::PhantomData; +use tokio_postgres::impls; use tokio_postgres::{Error, Row}; pub struct QueryPortal<'a> { - it: stream::Wait, + it: stream::Wait, _p: PhantomData<&'a mut ()>, } @@ -14,7 +15,7 @@ impl<'a> Drop for QueryPortal<'a> { } impl<'a> QueryPortal<'a> { - pub(crate) fn new(stream: tokio_postgres::QueryPortal) -> QueryPortal<'a> { + pub(crate) fn new(stream: impls::QueryPortal) -> QueryPortal<'a> { QueryPortal { it: stream.wait(), _p: PhantomData, diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index b38c1a0c8..5ce8d8807 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -15,12 +15,15 @@ use std::sync::Arc; use std::time::Duration; use tokio_io::{AsyncRead, AsyncWrite}; +#[cfg(feature = "runtime")] +use crate::impls::Connect; +use crate::impls::ConnectRaw; #[cfg(feature = "runtime")] use crate::proto::ConnectFuture; use crate::proto::ConnectRawFuture; +use crate::{Error, TlsConnect}; #[cfg(feature = "runtime")] -use crate::{Connect, MakeTlsConnect, Socket}; -use crate::{ConnectRaw, Error, TlsConnect}; +use crate::{MakeTlsConnect, Socket}; /// Properties required of a session. #[cfg(feature = "runtime")] diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs new file mode 100644 index 000000000..9db60cde6 --- /dev/null +++ b/tokio-postgres/src/impls.rs @@ -0,0 +1,178 @@ +use bytes::{Bytes, IntoBuf}; +use futures::{try_ready, Async, Future, Poll, Stream}; +use std::error; +use tokio_io::{AsyncRead, AsyncWrite}; + +use crate::proto; +use crate::{Client, Connection, Error, Portal, Row, Statement, TlsConnect}; +#[cfg(feature = "runtime")] +use crate::{MakeTlsConnect, Socket}; + +#[must_use = "futures do nothing unless polled"] +pub struct CancelQueryRaw(pub(crate) proto::CancelQueryRawFuture) +where + S: AsyncRead + AsyncWrite, + T: TlsConnect; + +impl Future for CancelQueryRaw +where + S: AsyncRead + AsyncWrite, + T: TlsConnect, +{ + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll<(), Error> { + self.0.poll() + } +} + +#[cfg(feature = "runtime")] +#[must_use = "futures do nothing unless polled"] +pub struct CancelQuery(pub(crate) proto::CancelQueryFuture) +where + T: MakeTlsConnect; + +#[cfg(feature = "runtime")] +impl Future for CancelQuery +where + T: MakeTlsConnect, +{ + type Item = (); + type Error = Error; + + fn poll(&mut self) -> Poll<(), Error> { + self.0.poll() + } +} + +#[must_use = "futures do nothing unless polled"] +pub struct ConnectRaw(pub(crate) proto::ConnectRawFuture) +where + S: AsyncRead + AsyncWrite, + T: TlsConnect; + +impl Future for ConnectRaw +where + S: AsyncRead + AsyncWrite, + T: TlsConnect, +{ + type Item = (Client, Connection); + type Error = Error; + + fn poll(&mut self) -> Poll<(Client, Connection), Error> { + let (client, connection) = try_ready!(self.0.poll()); + + Ok(Async::Ready((Client(client), Connection(connection)))) + } +} + +#[cfg(feature = "runtime")] +#[must_use = "futures do nothing unless polled"] +pub struct Connect(pub(crate) proto::ConnectFuture) +where + T: MakeTlsConnect; + +#[cfg(feature = "runtime")] +impl Future for Connect +where + T: MakeTlsConnect, +{ + type Item = (Client, Connection); + type Error = Error; + + fn poll(&mut self) -> Poll<(Client, Connection), Error> { + let (client, connection) = try_ready!(self.0.poll()); + + Ok(Async::Ready((Client(client), Connection(connection)))) + } +} + +#[must_use = "futures do nothing unless polled"] +pub struct Prepare(pub(crate) proto::PrepareFuture); + +impl Future for Prepare { + type Item = Statement; + type Error = Error; + + fn poll(&mut self) -> Poll { + let statement = try_ready!(self.0.poll()); + + Ok(Async::Ready(Statement(statement))) + } +} + +#[must_use = "streams do nothing unless polled"] +pub struct Query(pub(crate) proto::QueryStream); + +impl Stream for Query { + type Item = Row; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + self.0.poll() + } +} + +#[must_use = "futures do nothing unless polled"] +pub struct Bind(pub(crate) proto::BindFuture); + +impl Future for Bind { + type Item = Portal; + type Error = Error; + + fn poll(&mut self) -> Poll { + match self.0.poll() { + Ok(Async::Ready(portal)) => Ok(Async::Ready(Portal(portal))), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(e) => Err(e), + } + } +} + +#[must_use = "streams do nothing unless polled"] +pub struct QueryPortal(pub(crate) proto::QueryStream); + +impl Stream for QueryPortal { + type Item = Row; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + self.0.poll() + } +} + +#[must_use = "futures do nothing unless polled"] +pub struct CopyIn(pub(crate) proto::CopyInFuture) +where + S: Stream, + S::Item: IntoBuf, + ::Buf: Send, + S::Error: Into>; + +impl Future for CopyIn +where + S: Stream, + S::Item: IntoBuf, + ::Buf: Send, + S::Error: Into>, +{ + type Item = u64; + type Error = Error; + + fn poll(&mut self) -> Poll { + self.0.poll() + } +} + +#[must_use = "streams do nothing unless polled"] +pub struct CopyOut(pub(crate) proto::CopyOutStream); + +impl Stream for CopyOut { + type Item = Bytes; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + self.0.poll() + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 5c8779e9e..095be98f5 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -101,7 +101,7 @@ //! all dependence on the tokio runtime is removed. #![warn(rust_2018_idioms, clippy::all)] -use bytes::{Bytes, IntoBuf}; +use bytes::IntoBuf; use futures::{try_ready, Async, Future, Poll, Stream}; use std::error::Error as StdError; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -118,6 +118,7 @@ use crate::types::{ToSql, Type}; mod config; pub mod error; +pub mod impls; mod proto; mod row; #[cfg(feature = "runtime")] @@ -144,11 +145,11 @@ fn next_portal() -> String { /// /// [`Config`]: ./Config.t.html #[cfg(feature = "runtime")] -pub fn connect(config: &str, tls: T) -> Connect +pub fn connect(config: &str, tls: T) -> impls::Connect where T: MakeTlsConnect, { - Connect(proto::ConnectFuture::new(tls, config.parse())) + impls::Connect(proto::ConnectFuture::new(tls, config.parse())) } /// An asynchronous PostgreSQL client. @@ -162,7 +163,7 @@ impl Client { /// /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), /// which are set when executed. Prepared statements can only be used with the connection that created them. - pub fn prepare(&mut self, query: &str) -> Prepare { + pub fn prepare(&mut self, query: &str) -> impls::Prepare { self.prepare_typed(query, &[]) } @@ -170,8 +171,8 @@ impl Client { /// /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. - pub fn prepare_typed(&mut self, query: &str, param_types: &[Type]) -> Prepare { - Prepare(self.0.prepare(next_statement(), query, param_types)) + pub fn prepare_typed(&mut self, query: &str, param_types: &[Type]) -> impls::Prepare { + impls::Prepare(self.0.prepare(next_statement(), query, param_types)) } /// Executes a statement, returning the number of rows modified. @@ -190,8 +191,8 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Query { - Query(self.0.query(&statement.0, params)) + pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Query { + impls::Query(self.0.query(&statement.0, params)) } /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. @@ -202,16 +203,16 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Bind { - Bind(self.0.bind(&statement.0, next_portal(), params)) + pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Bind { + impls::Bind(self.0.bind(&statement.0, next_portal(), params)) } /// Continues execution of a portal, returning a stream of the resulting rows. /// /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// query_portal. If the requested number is negative or 0, all rows will be returned. - pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> QueryPortal { - QueryPortal(self.0.query_portal(&portal.0, max_rows)) + pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> impls::QueryPortal { + impls::QueryPortal(self.0.query_portal(&portal.0, max_rows)) } /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. @@ -223,7 +224,7 @@ impl Client { statement: &Statement, params: &[&dyn ToSql], stream: S, - ) -> CopyIn + ) -> impls::CopyIn where S: Stream, S::Item: IntoBuf, @@ -231,12 +232,12 @@ impl Client { // FIXME error type? S::Error: Into>, { - CopyIn(self.0.copy_in(&statement.0, params, stream)) + impls::CopyIn(self.0.copy_in(&statement.0, params, stream)) } /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. - pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOut { - CopyOut(self.0.copy_out(&statement.0, params)) + pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::CopyOut { + impls::CopyOut(self.0.copy_out(&statement.0, params)) } /// Executes a sequence of SQL statements. @@ -283,21 +284,21 @@ impl Client { /// /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] - pub fn cancel_query(&mut self, make_tls_mode: T) -> CancelQuery + pub fn cancel_query(&mut self, make_tls_mode: T) -> impls::CancelQuery where T: MakeTlsConnect, { - CancelQuery(self.0.cancel_query(make_tls_mode)) + impls::CancelQuery(self.0.cancel_query(make_tls_mode)) } /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. - pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> CancelQueryRaw + pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> impls::CancelQueryRaw where S: AsyncRead + AsyncWrite, T: TlsConnect, { - CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) + impls::CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) } /// Determines if the connection to the server has already closed. @@ -375,100 +376,6 @@ pub enum AsyncMessage { __NonExhaustive, } -#[must_use = "futures do nothing unless polled"] -pub struct CancelQueryRaw(proto::CancelQueryRawFuture) -where - S: AsyncRead + AsyncWrite, - T: TlsConnect; - -impl Future for CancelQueryRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() - } -} - -#[cfg(feature = "runtime")] -#[must_use = "futures do nothing unless polled"] -pub struct CancelQuery(proto::CancelQueryFuture) -where - T: MakeTlsConnect; - -#[cfg(feature = "runtime")] -impl Future for CancelQuery -where - T: MakeTlsConnect, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() - } -} - -#[must_use = "futures do nothing unless polled"] -pub struct ConnectRaw(proto::ConnectRawFuture) -where - S: AsyncRead + AsyncWrite, - T: TlsConnect; - -impl Future for ConnectRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - type Item = (Client, Connection); - type Error = Error; - - fn poll(&mut self) -> Poll<(Client, Connection), Error> { - let (client, connection) = try_ready!(self.0.poll()); - - Ok(Async::Ready((Client(client), Connection(connection)))) - } -} - -#[cfg(feature = "runtime")] -#[must_use = "futures do nothing unless polled"] -pub struct Connect(proto::ConnectFuture) -where - T: MakeTlsConnect; - -#[cfg(feature = "runtime")] -impl Future for Connect -where - T: MakeTlsConnect, -{ - type Item = (Client, Connection); - type Error = Error; - - fn poll(&mut self) -> Poll<(Client, Connection), Error> { - let (client, connection) = try_ready!(self.0.poll()); - - Ok(Async::Ready((Client(client), Connection(connection)))) - } -} - -#[must_use = "futures do nothing unless polled"] -pub struct Prepare(proto::PrepareFuture); - -impl Future for Prepare { - type Item = Statement; - type Error = Error; - - fn poll(&mut self) -> Poll { - let statement = try_ready!(self.0.poll()); - - Ok(Async::Ready(Statement(statement))) - } -} - /// A prepared statement. /// /// Prepared statements can only be used with the connection that created them. @@ -499,87 +406,12 @@ impl Future for Execute { } } -#[must_use = "streams do nothing unless polled"] -pub struct Query(proto::QueryStream); - -impl Stream for Query { - type Item = Row; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - -#[must_use = "futures do nothing unless polled"] -pub struct Bind(proto::BindFuture); - -impl Future for Bind { - type Item = Portal; - type Error = Error; - - fn poll(&mut self) -> Poll { - match self.0.poll() { - Ok(Async::Ready(portal)) => Ok(Async::Ready(Portal(portal))), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => Err(e), - } - } -} - -#[must_use = "streams do nothing unless polled"] -pub struct QueryPortal(proto::QueryStream); - -impl Stream for QueryPortal { - type Item = Row; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - /// A portal. /// /// Portals can only be used with the connection that created them, and only exist for the duration of the transaction /// in which they were created. pub struct Portal(proto::Portal); -#[must_use = "futures do nothing unless polled"] -pub struct CopyIn(proto::CopyInFuture) -where - S: Stream, - S::Item: IntoBuf, - ::Buf: Send, - S::Error: Into>; - -impl Future for CopyIn -where - S: Stream, - S::Item: IntoBuf, - ::Buf: Send, - S::Error: Into>, -{ - type Item = u64; - type Error = Error; - - fn poll(&mut self) -> Poll { - self.0.poll() - } -} - -#[must_use = "streams do nothing unless polled"] -pub struct CopyOut(proto::CopyOutStream); - -impl Stream for CopyOut { - type Item = Bytes; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - /// A builder type which can wrap a future in a database transaction. pub struct TransactionBuilder(proto::Client); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 965d22f4b..cf10269ab 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -11,6 +11,7 @@ use tokio::prelude::*; use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; use tokio_postgres::error::SqlState; +use tokio_postgres::impls; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, NoTlsStream}; @@ -745,7 +746,7 @@ fn poll_idle_running() { fn poll_idle_new() { struct IdleFuture { client: tokio_postgres::Client, - prepare: Option, + prepare: Option, } impl Future for IdleFuture { From 9e06d6b35e0a8401d430ef3de31eb5561c1b8099 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 17 Jan 2019 21:16:50 -0800 Subject: [PATCH 118/819] Document impls module --- tokio-postgres/src/impls.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs index 9db60cde6..55ea60ca4 100644 --- a/tokio-postgres/src/impls.rs +++ b/tokio-postgres/src/impls.rs @@ -1,3 +1,4 @@ +//! Futures and stream types used in the crate. use bytes::{Bytes, IntoBuf}; use futures::{try_ready, Async, Future, Poll, Stream}; use std::error; @@ -8,6 +9,7 @@ use crate::{Client, Connection, Error, Portal, Row, Statement, TlsConnect}; #[cfg(feature = "runtime")] use crate::{MakeTlsConnect, Socket}; +/// The future returned by `Client::cancel_query_raw`. #[must_use = "futures do nothing unless polled"] pub struct CancelQueryRaw(pub(crate) proto::CancelQueryRawFuture) where @@ -27,6 +29,7 @@ where } } +/// The future returned by `Client::cancel_query`. #[cfg(feature = "runtime")] #[must_use = "futures do nothing unless polled"] pub struct CancelQuery(pub(crate) proto::CancelQueryFuture) @@ -46,6 +49,7 @@ where } } +/// The future returned by `Config::connect_raw`. #[must_use = "futures do nothing unless polled"] pub struct ConnectRaw(pub(crate) proto::ConnectRawFuture) where @@ -67,6 +71,7 @@ where } } +/// The future returned by `Config::connect`. #[cfg(feature = "runtime")] #[must_use = "futures do nothing unless polled"] pub struct Connect(pub(crate) proto::ConnectFuture) @@ -88,6 +93,7 @@ where } } +/// The future returned by `Client::prepare`. #[must_use = "futures do nothing unless polled"] pub struct Prepare(pub(crate) proto::PrepareFuture); @@ -102,6 +108,7 @@ impl Future for Prepare { } } +/// The future returned by `Client::query`. #[must_use = "streams do nothing unless polled"] pub struct Query(pub(crate) proto::QueryStream); @@ -114,6 +121,7 @@ impl Stream for Query { } } +/// The future returned by `Client::bind`. #[must_use = "futures do nothing unless polled"] pub struct Bind(pub(crate) proto::BindFuture); @@ -130,6 +138,7 @@ impl Future for Bind { } } +/// The future returned by `Client::query_portal`. #[must_use = "streams do nothing unless polled"] pub struct QueryPortal(pub(crate) proto::QueryStream); @@ -142,6 +151,7 @@ impl Stream for QueryPortal { } } +/// The future returned by `Client::copy_in`. #[must_use = "futures do nothing unless polled"] pub struct CopyIn(pub(crate) proto::CopyInFuture) where @@ -165,6 +175,7 @@ where } } +/// The future returned by `Client::copy_out`. #[must_use = "streams do nothing unless polled"] pub struct CopyOut(pub(crate) proto::CopyOutStream); From d2723f53829972ff187d3f87584c626b00a59dfe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 18 Jan 2019 21:29:13 -0800 Subject: [PATCH 119/819] Don't conditionally compile config bits --- tokio-postgres/src/config.rs | 52 +++--------------------------- tokio-postgres/tests/test/parse.rs | 7 +--- 2 files changed, 6 insertions(+), 53 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 5ce8d8807..eb4b97e2d 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,17 +1,16 @@ use std::borrow::Cow; use std::error; -#[cfg(all(feature = "runtime", unix))] +#[cfg(unix)] use std::ffi::OsStr; use std::fmt; use std::iter; use std::mem; -#[cfg(all(feature = "runtime", unix))] +#[cfg(unix)] use std::os::unix::ffi::OsStrExt; -#[cfg(all(feature = "runtime", unix))] +#[cfg(unix)] use std::path::{Path, PathBuf}; use std::str::{self, FromStr}; use std::sync::Arc; -#[cfg(feature = "runtime")] use std::time::Duration; use tokio_io::{AsyncRead, AsyncWrite}; @@ -26,7 +25,6 @@ use crate::{Error, TlsConnect}; use crate::{MakeTlsConnect, Socket}; /// Properties required of a session. -#[cfg(feature = "runtime")] #[derive(Debug, Copy, Clone, PartialEq)] pub enum TargetSessionAttrs { /// No special properties are required. @@ -48,7 +46,6 @@ pub enum SslMode { Require, } -#[cfg(feature = "runtime")] #[derive(Debug, Clone, PartialEq)] pub(crate) enum Host { Tcp(String), @@ -64,17 +61,11 @@ pub(crate) struct Inner { pub(crate) options: Option, pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, - #[cfg(feature = "runtime")] pub(crate) host: Vec, - #[cfg(feature = "runtime")] pub(crate) port: Vec, - #[cfg(feature = "runtime")] pub(crate) connect_timeout: Option, - #[cfg(feature = "runtime")] pub(crate) keepalives: bool, - #[cfg(feature = "runtime")] pub(crate) keepalives_idle: Duration, - #[cfg(feature = "runtime")] pub(crate) target_session_attrs: TargetSessionAttrs, } @@ -170,17 +161,11 @@ impl Config { options: None, application_name: None, ssl_mode: SslMode::Prefer, - #[cfg(feature = "runtime")] host: vec![], - #[cfg(feature = "runtime")] port: vec![], - #[cfg(feature = "runtime")] connect_timeout: None, - #[cfg(feature = "runtime")] keepalives: true, - #[cfg(feature = "runtime")] keepalives_idle: Duration::from_secs(2 * 60 * 60), - #[cfg(feature = "runtime")] target_session_attrs: TargetSessionAttrs::Any, })) } @@ -234,9 +219,6 @@ impl Config { /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] { @@ -254,9 +236,6 @@ impl Config { /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. - /// - /// Requires the `runtime` Cargo feature (enabled by default) and a Unix target. - #[cfg(all(feature = "runtime", unix))] pub fn host_path(&mut self, host: T) -> &mut Config where T: AsRef, @@ -272,9 +251,6 @@ impl Config { /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which /// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports /// as hosts. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn port(&mut self, port: u16) -> &mut Config { Arc::make_mut(&mut self.0).port.push(port); self @@ -284,9 +260,6 @@ impl Config { /// /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each /// host separately. Defaults to no limit. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { Arc::make_mut(&mut self.0).connect_timeout = Some(connect_timeout); self @@ -295,9 +268,6 @@ impl Config { /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { Arc::make_mut(&mut self.0).keepalives = keepalives; self @@ -306,9 +276,6 @@ impl Config { /// Sets the amount of idle time before a keepalive packet is sent on the connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { Arc::make_mut(&mut self.0).keepalives_idle = keepalives_idle; self @@ -318,9 +285,6 @@ impl Config { /// /// This can be used to connect to the primary server in a clustered database rather than one of the read-only /// secondary servers. Defaults to `Any`. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn target_session_attrs( &mut self, target_session_attrs: TargetSessionAttrs, @@ -355,13 +319,11 @@ impl Config { }; self.ssl_mode(mode); } - #[cfg(feature = "runtime")] "host" => { for host in value.split(',') { self.host(host); } } - #[cfg(feature = "runtime")] "port" => { for port in value.split(',') { let port = if port.is_empty() { @@ -373,7 +335,6 @@ impl Config { self.port(port); } } - #[cfg(feature = "runtime")] "connect_timeout" => { let timeout = value .parse::() @@ -382,14 +343,12 @@ impl Config { self.connect_timeout(Duration::from_secs(timeout as u64)); } } - #[cfg(feature = "runtime")] "keepalives" => { let keepalives = value .parse::() .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; self.keepalives(keepalives != 0); } - #[cfg(feature = "runtime")] "keepalives_idle" => { let keepalives_idle = value .parse::() @@ -398,7 +357,6 @@ impl Config { self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } - #[cfg(feature = "runtime")] "target_session_attrs" => { let target_session_attrs = match &*value { "any" => TargetSessionAttrs::Any, @@ -800,7 +758,7 @@ impl<'a> UrlParser<'a> { Ok(()) } - #[cfg(all(feature = "runtime", unix))] + #[cfg(unix)] fn host_param(&mut self, s: &str) -> Result<(), Error> { let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); if decoded.get(0) == Some(&b'/') { @@ -813,7 +771,7 @@ impl<'a> UrlParser<'a> { Ok(()) } - #[cfg(not(all(feature = "runtime", unix)))] + #[cfg(not(unix))] fn host_param(&mut self, s: &str) -> Result<(), Error> { let s = self.decode(s)?; self.config.param("host", &s) diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 0e94e33c8..ed3b59824 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,8 +1,5 @@ -#[cfg(feature = "runtime")] use std::time::Duration; -use tokio_postgres::Config; -#[cfg(feature = "runtime")] -use tokio_postgres::TargetSessionAttrs; +use tokio_postgres::{Config, TargetSessionAttrs}; fn check(s: &str, config: &Config) { assert_eq!(s.parse::().expect(s), *config, "`{}`", s); @@ -28,7 +25,6 @@ fn pairs_ws() { } #[test] -#[cfg(feature = "runtime")] fn settings() { check( "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-write", @@ -41,7 +37,6 @@ fn settings() { } #[test] -#[cfg(feature = "runtime")] fn url() { check("postgresql://", &Config::new()); check( From e3a25ad6c864070a1ce9f8086cb8518a2fddca6f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Jan 2019 13:38:51 -0800 Subject: [PATCH 120/819] Skip DNS lookup for IP addrs --- tokio-postgres/src/proto/connect_socket.rs | 36 ++++++++++++++++------ 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/tokio-postgres/src/proto/connect_socket.rs b/tokio-postgres/src/proto/connect_socket.rs index 65cbe8955..7b7e5cb88 100644 --- a/tokio-postgres/src/proto/connect_socket.rs +++ b/tokio-postgres/src/proto/connect_socket.rs @@ -3,7 +3,7 @@ use futures_cpupool::{CpuFuture, CpuPool}; use lazy_static::lazy_static; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; +use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::time::Instant; use std::vec; use tokio_tcp::TcpStream; @@ -23,8 +23,14 @@ lazy_static! { #[derive(StateMachineFuture)] pub enum ConnectSocket { #[state_machine_future(start)] - #[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))] - #[cfg_attr(not(unix), state_machine_future(transitions(ResolvingDns)))] + #[cfg_attr( + unix, + state_machine_future(transitions(ConnectingUnix, ConnectingTcp, ResolvingDns)) + )] + #[cfg_attr( + not(unix), + state_machine_future(transitions(ConnectingTcp, ResolvingDns)) + )] Start { config: Config, idx: usize }, #[cfg(unix)] #[state_machine_future(transitions(Finished))] @@ -63,13 +69,25 @@ impl PollConnectSocket for ConnectSocket { .unwrap_or(&5432); match &state.config.0.host[state.idx] { - Host::Tcp(host) => transition!(ResolvingDns { - future: DNS_POOL.spawn_fn({ - let host = host.clone(); - move || (&*host, port).to_socket_addrs() + Host::Tcp(host) => match host.parse::() { + Ok(addr) => transition!(ConnectingTcp { + future: TcpStream::connect(&SocketAddr::new(addr, port)), + timeout: state + .config + .0 + .connect_timeout + .map(|d| Delay::new(Instant::now() + d)), + addrs: vec![].into_iter(), + config: state.config, }), - config: state.config, - }), + Err(_) => transition!(ResolvingDns { + future: DNS_POOL.spawn_fn({ + let host = host.clone(); + move || (&*host, port).to_socket_addrs() + }), + config: state.config, + }), + }, #[cfg(unix)] Host::Unix(host) => { let path = host.join(format!(".s.PGSQL.{}", port)); From df84dd8fd0558827b545d7eaac92b57320d5408f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 29 Jan 2019 19:29:33 -0800 Subject: [PATCH 121/819] Add convenience to_vec methods to fallible iterators The very common case is to simply collect these to a vector, and this lets people avoid having to import FallibleIterator. --- postgres/src/query.rs | 5 +++++ postgres/src/query_portal.rs | 5 +++++ postgres/src/test.rs | 25 ++++++++++--------------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/postgres/src/query.rs b/postgres/src/query.rs index 7376fa99f..99332e538 100644 --- a/postgres/src/query.rs +++ b/postgres/src/query.rs @@ -21,6 +21,11 @@ impl<'a> Query<'a> { _p: PhantomData, } } + + /// A convenience API which collects the resulting rows into a `Vec` and returns them. + pub fn to_vec(self) -> Result, Error> { + self.collect() + } } impl<'a> FallibleIterator for Query<'a> { diff --git a/postgres/src/query_portal.rs b/postgres/src/query_portal.rs index d2ee40679..75f380c2c 100644 --- a/postgres/src/query_portal.rs +++ b/postgres/src/query_portal.rs @@ -21,6 +21,11 @@ impl<'a> QueryPortal<'a> { _p: PhantomData, } } + + /// A convenience API which collects the resulting rows into a `Vec` and returns them. + pub fn to_vec(self) -> Result, Error> { + self.collect() + } } impl<'a> FallibleIterator for QueryPortal<'a> { diff --git a/postgres/src/test.rs b/postgres/src/test.rs index ed6242a17..89e71bc37 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,3 @@ -use fallible_iterator::FallibleIterator; use std::io::Read; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -21,11 +20,7 @@ fn query_prepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); let stmt = client.prepare("SELECT $1::TEXT").unwrap(); - let rows = client - .query(&stmt, &[&"hello"]) - .unwrap() - .collect::>() - .unwrap(); + let rows = client.query(&stmt, &[&"hello"]).unwrap().to_vec().unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -37,7 +32,7 @@ fn query_unprepared() { let rows = client .query("SELECT $1::TEXT", &[&"hello"]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); @@ -62,7 +57,7 @@ fn transaction_commit() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -87,7 +82,7 @@ fn transaction_rollback() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 0); } @@ -111,7 +106,7 @@ fn transaction_drop() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 0); } @@ -141,7 +136,7 @@ fn nested_transactions() { let rows = transaction .query("SELECT id FROM foo ORDER BY id", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -165,7 +160,7 @@ fn nested_transactions() { let rows = client .query("SELECT id FROM foo ORDER BY id", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 3); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -192,7 +187,7 @@ fn copy_in() { let rows = client .query("SELECT id, name FROM foo ORDER BY id", &[]) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 2); @@ -251,7 +246,7 @@ fn portal() { let rows = transaction .query_portal(&portal, 2) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -260,7 +255,7 @@ fn portal() { let rows = transaction .query_portal(&portal, 2) .unwrap() - .collect::>() + .to_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); From e57a2976e951ba4bfb47e41683bebd6df87fb314 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 29 Jan 2019 19:40:49 -0800 Subject: [PATCH 122/819] Fix clippy --- postgres/src/query.rs | 2 +- postgres/src/query_portal.rs | 2 +- postgres/src/test.rs | 24 ++++++++++++++---------- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/postgres/src/query.rs b/postgres/src/query.rs index 99332e538..4b2ae0b7b 100644 --- a/postgres/src/query.rs +++ b/postgres/src/query.rs @@ -23,7 +23,7 @@ impl<'a> Query<'a> { } /// A convenience API which collects the resulting rows into a `Vec` and returns them. - pub fn to_vec(self) -> Result, Error> { + pub fn into_vec(self) -> Result, Error> { self.collect() } } diff --git a/postgres/src/query_portal.rs b/postgres/src/query_portal.rs index 75f380c2c..824facc1c 100644 --- a/postgres/src/query_portal.rs +++ b/postgres/src/query_portal.rs @@ -23,7 +23,7 @@ impl<'a> QueryPortal<'a> { } /// A convenience API which collects the resulting rows into a `Vec` and returns them. - pub fn to_vec(self) -> Result, Error> { + pub fn into_vec(self) -> Result, Error> { self.collect() } } diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 89e71bc37..f9d7ec9b8 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -20,7 +20,11 @@ fn query_prepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); let stmt = client.prepare("SELECT $1::TEXT").unwrap(); - let rows = client.query(&stmt, &[&"hello"]).unwrap().to_vec().unwrap(); + let rows = client + .query(&stmt, &[&"hello"]) + .unwrap() + .into_vec() + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -32,7 +36,7 @@ fn query_unprepared() { let rows = client .query("SELECT $1::TEXT", &[&"hello"]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); @@ -57,7 +61,7 @@ fn transaction_commit() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -82,7 +86,7 @@ fn transaction_rollback() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 0); } @@ -106,7 +110,7 @@ fn transaction_drop() { let rows = client .query("SELECT * FROM foo", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 0); } @@ -136,7 +140,7 @@ fn nested_transactions() { let rows = transaction .query("SELECT id FROM foo ORDER BY id", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -160,7 +164,7 @@ fn nested_transactions() { let rows = client .query("SELECT id FROM foo ORDER BY id", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 3); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -187,7 +191,7 @@ fn copy_in() { let rows = client .query("SELECT id, name FROM foo ORDER BY id", &[]) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 2); @@ -246,7 +250,7 @@ fn portal() { let rows = transaction .query_portal(&portal, 2) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -255,7 +259,7 @@ fn portal() { let rows = transaction .query_portal(&portal, 2) .unwrap() - .to_vec() + .into_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); From 0f8943952ddb78910acbb15464d15310d9eb7308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=9D=9A=E5=9D=9A=E5=86=B0?= Date: Thu, 31 Jan 2019 12:30:45 +0800 Subject: [PATCH 123/819] fix missing '#[cfg(unix)]' --- tokio-postgres/src/config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index eb4b97e2d..1ebe2b9af 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -236,6 +236,7 @@ impl Config { /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. + #[cfg(unix)] pub fn host_path(&mut self, host: T) -> &mut Config where T: AsRef, From 289de79e00c9c6da52d3312cfebf1ae3e3c3d49b Mon Sep 17 00:00:00 2001 From: lygz5016 Date: Thu, 31 Jan 2019 13:19:39 +0800 Subject: [PATCH 124/819] add features option --- postgres/Cargo.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 2a203e3a6..87f5faed5 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -9,6 +9,13 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] +"with-bit-vec-0.5" = ["tokio-postgres/with-bit-vec-0.5"] +"with-chrono-0.4" = ["tokio-postgres/with-chrono-0.4"] +"with-eui48-0.4" = ["tokio-postgres/with-eui48-0.4"] +"with-geo-0.10" = ["tokio-postgres/with-geo-0.10"] +"with-serde_json-1" = ["tokio-postgres/with-serde_json-1"] +"with-uuid-0.7" = ["tokio-postgres/with-uuid-0.7"] + [dependencies] bytes = "0.4" fallible-iterator = "0.1" From 5c7ddc9c85f659bfc605c49d361a74e0de27fcee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=9D=9A=E5=9D=9A=E5=86=B0?= Date: Thu, 31 Jan 2019 14:48:04 +0800 Subject: [PATCH 125/819] Upgrade geo, rand and base64 --- postgres-protocol/Cargo.toml | 4 ++-- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 2c8157e59..2e03983b0 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres-protocol" readme = "../README.md" [dependencies] -base64 = "0.9" +base64 = "0.10" byteorder = "1.0" bytes = "0.4" fallible-iterator = "0.1" @@ -17,6 +17,6 @@ generic-array = "0.12" hmac = "0.7" md5 = "0.5" memchr = "2.0" -rand = "0.5" +rand = "0.6" sha2 = "0.8" stringprep = "0.1" diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 87f5faed5..5ea814f4f 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -12,7 +12,7 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] "with-bit-vec-0.5" = ["tokio-postgres/with-bit-vec-0.5"] "with-chrono-0.4" = ["tokio-postgres/with-chrono-0.4"] "with-eui48-0.4" = ["tokio-postgres/with-eui48-0.4"] -"with-geo-0.10" = ["tokio-postgres/with-geo-0.10"] +"with-geo-0.11" = ["tokio-postgres/with-geo-0.11"] "with-serde_json-1" = ["tokio-postgres/with-serde_json-1"] "with-uuid-0.7" = ["tokio-postgres/with-uuid-0.7"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index d47a105cc..5f216120d 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -18,7 +18,7 @@ features = [ "with-bit-vec-0.5", "with-chrono-0.4", "with-eui48-0.4", - "with-geo-0.10", + "with-geo-0.11", "with-serde_json-1", "with-uuid-0.7", ] @@ -33,7 +33,7 @@ runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_sta "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] "with-eui48-0.4" = ["eui48-04"] -"with-geo-0.10" = ["geo-010"] +"with-geo-0.11" = ["geo-011"] with-serde_json-1 = ["serde-1", "serde_json-1"] "with-uuid-0.7" = ["uuid-07"] @@ -58,7 +58,7 @@ tokio-timer = { version = "0.2", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-010 = { version = "0.10", package = "geo", optional = true } +geo-011 = { version = "0.11", package = "geo", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } From 32e09dbb9152eb90bdd648ba8000c873634f4c33 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 31 Jan 2019 20:34:49 -0800 Subject: [PATCH 126/819] Change batch_execute into simple_query Closes #413 --- postgres/src/client.rs | 9 +- postgres/src/lib.rs | 2 + postgres/src/simple_query.rs | 42 ++++ postgres/src/test.rs | 45 ++-- postgres/src/transaction.rs | 32 +-- tokio-postgres-native-tls/src/test.rs | 2 +- tokio-postgres-openssl/src/test.rs | 2 +- tokio-postgres/src/error/mod.rs | 6 + tokio-postgres/src/impls.rs | 15 +- tokio-postgres/src/lib.rs | 31 ++- tokio-postgres/src/proto/client.rs | 2 +- tokio-postgres/src/proto/connect_once.rs | 37 ++-- tokio-postgres/src/proto/simple_query.rs | 70 ++++-- tokio-postgres/src/proto/transaction.rs | 6 +- tokio-postgres/src/proto/typeinfo.rs | 28 +-- .../src/proto/typeinfo_composite.rs | 4 +- tokio-postgres/src/proto/typeinfo_enum.rs | 2 +- tokio-postgres/src/row.rs | 112 ++++++++-- tokio-postgres/tests/test/main.rs | 199 +++++++++++------- tokio-postgres/tests/test/runtime.rs | 7 +- tokio-postgres/tests/test/types/mod.rs | 87 ++++---- 21 files changed, 497 insertions(+), 243 deletions(-) create mode 100644 postgres/src/simple_query.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 3b2d4f9c5..a1e4e8e93 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,3 +1,4 @@ +use fallible_iterator::FallibleIterator; use futures::{Async, Future, Poll, Stream}; use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; @@ -7,7 +8,7 @@ use tokio_postgres::{MakeTlsConnect, Socket, TlsConnect}; #[cfg(feature = "runtime")] use crate::Config; -use crate::{CopyOutReader, Query, Statement, ToStatement, Transaction}; +use crate::{CopyOutReader, Query, SimpleQuery, Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -81,12 +82,12 @@ impl Client { CopyOutReader::new(stream) } - pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - self.0.batch_execute(query).wait() + pub fn simple_query(&mut self, query: &str) -> Result, Error> { + Ok(SimpleQuery::new(self.0.simple_query(query))) } pub fn transaction(&mut self) -> Result, Error> { - self.batch_execute("BEGIN")?; + self.simple_query("BEGIN")?.count()?; Ok(Transaction::new(self)) } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index d3c68c89e..eefe5f938 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -10,6 +10,7 @@ mod copy_out_reader; mod portal; mod query; mod query_portal; +mod simple_query; mod statement; mod to_statement; mod transaction; @@ -25,6 +26,7 @@ pub use crate::copy_out_reader::*; pub use crate::portal::*; pub use crate::query::*; pub use crate::query_portal::*; +pub use crate::simple_query::*; pub use crate::statement::*; pub use crate::to_statement::*; pub use crate::transaction::*; diff --git a/postgres/src/simple_query.rs b/postgres/src/simple_query.rs new file mode 100644 index 000000000..b3a158928 --- /dev/null +++ b/postgres/src/simple_query.rs @@ -0,0 +1,42 @@ +use fallible_iterator::FallibleIterator; +use futures::stream::{self, Stream}; +use std::marker::PhantomData; +use tokio_postgres::impls; +use tokio_postgres::{Error, SimpleQueryMessage}; + +pub struct SimpleQuery<'a> { + it: stream::Wait, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend borrow until drop +impl<'a> Drop for SimpleQuery<'a> { + fn drop(&mut self) {} +} + +impl<'a> SimpleQuery<'a> { + pub(crate) fn new(stream: impls::SimpleQuery) -> SimpleQuery<'a> { + SimpleQuery { + it: stream.wait(), + _p: PhantomData, + } + } + + /// A convenience API which collects the resulting messages into a `Vec` and returns them. + pub fn into_vec(self) -> Result, Error> { + self.collect() + } +} + +impl<'a> FallibleIterator for SimpleQuery<'a> { + type Item = SimpleQueryMessage; + type Error = Error; + + fn next(&mut self) -> Result, Error> { + match self.it.next() { + Some(Ok(row)) => Ok(Some(row)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } +} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index f9d7ec9b8..9fa6bda02 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,3 +1,4 @@ +use fallible_iterator::FallibleIterator; use std::io::Read; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -47,7 +48,9 @@ fn transaction_commit() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap() + .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -72,7 +75,9 @@ fn transaction_rollback() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap() + .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -96,7 +101,9 @@ fn transaction_drop() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") + .unwrap() + .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -120,7 +127,9 @@ fn nested_transactions() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .simple_query("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .unwrap() + .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -177,7 +186,9 @@ fn copy_in() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") + .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") + .unwrap() + .count() .unwrap(); client @@ -206,13 +217,12 @@ fn copy_out() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute( - " - CREATE TEMPORARY TABLE foo (id INT, name TEXT); - - INSERT INTO foo (id, name) VALUES (1, 'steven'), (2, 'timothy'); - ", + .simple_query( + "CREATE TEMPORARY TABLE foo (id INT, name TEXT); + INSERT INTO foo (id, name) VALUES (1, 'steven'), (2, 'timothy');", ) + .unwrap() + .count() .unwrap(); let mut reader = client @@ -224,7 +234,7 @@ fn copy_out() { assert_eq!(s, "1\tsteven\n2\ttimothy\n"); - client.batch_execute("SELECT 1").unwrap(); + client.simple_query("SELECT 1").unwrap().count().unwrap(); } #[test] @@ -232,13 +242,12 @@ fn portal() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .batch_execute( - " - CREATE TEMPORARY TABLE foo (id INT); - - INSERT INTO foo (id) VALUES (1), (2), (3); - ", + .simple_query( + "CREATE TEMPORARY TABLE foo (id INT); + INSERT INTO foo (id) VALUES (1), (2), (3);", ) + .unwrap() + .count() .unwrap(); let mut transaction = client.transaction().unwrap(); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 4453f88c8..e76ac028c 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,9 +1,12 @@ +use fallible_iterator::FallibleIterator; use futures::Future; use std::io::Read; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::Error; -use crate::{Client, CopyOutReader, Portal, Query, QueryPortal, Statement, ToStatement}; +use crate::{ + Client, CopyOutReader, Portal, Query, QueryPortal, SimpleQuery, Statement, ToStatement, +}; pub struct Transaction<'a> { client: &'a mut Client, @@ -30,12 +33,14 @@ impl<'a> Transaction<'a> { pub fn commit(mut self) -> Result<(), Error> { self.done = true; - if self.depth == 0 { - self.client.batch_execute("COMMIT") + let it = if self.depth == 0 { + self.client.simple_query("COMMIT")? } else { self.client - .batch_execute(&format!("RELEASE sp{}", self.depth)) - } + .simple_query(&format!("RELEASE sp{}", self.depth))? + }; + it.count()?; + Ok(()) } pub fn rollback(mut self) -> Result<(), Error> { @@ -44,12 +49,14 @@ impl<'a> Transaction<'a> { } fn rollback_inner(&mut self) -> Result<(), Error> { - if self.depth == 0 { - self.client.batch_execute("ROLLBACK") + let it = if self.depth == 0 { + self.client.simple_query("ROLLBACK")? } else { self.client - .batch_execute(&format!("ROLLBACK TO sp{}", self.depth)) - } + .simple_query(&format!("ROLLBACK TO sp{}", self.depth))? + }; + it.count()?; + Ok(()) } pub fn prepare(&mut self, query: &str) -> Result { @@ -120,14 +127,15 @@ impl<'a> Transaction<'a> { self.client.copy_out(query, params) } - pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - self.client.batch_execute(query) + pub fn simple_query(&mut self, query: &str) -> Result, Error> { + self.client.simple_query(query) } pub fn transaction(&mut self) -> Result, Error> { let depth = self.depth + 1; self.client - .batch_execute(&format!("SAVEPOINT sp{}", depth))?; + .simple_query(&format!("SAVEPOINT sp{}", depth))? + .count()?; Ok(Transaction { client: self.client, depth, diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 39938f3c8..2a756f58d 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -100,6 +100,6 @@ fn runtime() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let execute = client.batch_execute("SELECT 1"); + let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); } diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 2dc336c4e..2b5d84eaf 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -85,6 +85,6 @@ fn runtime() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let execute = client.batch_execute("SELECT 1"); + let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 144340da6..3e9545434 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -336,6 +336,7 @@ enum Kind { Tls, ToSql, FromSql, + Column, CopyInStream, Closed, Db, @@ -373,6 +374,7 @@ impl fmt::Display for Error { Kind::Tls => "error performing TLS handshake", Kind::ToSql => "error serializing a value", Kind::FromSql => "error deserializing a value", + Kind::Column => "invalid column", Kind::CopyInStream => "error from a copy_in stream", Kind::Closed => "connection closed", Kind::Db => "db error", @@ -451,6 +453,10 @@ impl Error { Error::new(Kind::FromSql, Some(e)) } + pub(crate) fn column() -> Error { + Error::new(Kind::Column, None) + } + pub(crate) fn copy_in_stream(e: E) -> Error where E: Into>, diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs index 55ea60ca4..ae7d1d423 100644 --- a/tokio-postgres/src/impls.rs +++ b/tokio-postgres/src/impls.rs @@ -5,7 +5,7 @@ use std::error; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto; -use crate::{Client, Connection, Error, Portal, Row, Statement, TlsConnect}; +use crate::{Client, Connection, Error, Portal, Row, SimpleQueryMessage, Statement, TlsConnect}; #[cfg(feature = "runtime")] use crate::{MakeTlsConnect, Socket}; @@ -187,3 +187,16 @@ impl Stream for CopyOut { self.0.poll() } } + +/// The stream returned by `Client::simple_query`. +#[must_use = "streams do nothing unless polled"] +pub struct SimpleQuery(pub(crate) proto::SimpleQueryStream); + +impl Stream for SimpleQuery { + type Item = SimpleQueryMessage; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + self.0.poll() + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 095be98f5..94b4d1761 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -102,7 +102,7 @@ #![warn(rust_2018_idioms, clippy::all)] use bytes::IntoBuf; -use futures::{try_ready, Async, Future, Poll, Stream}; +use futures::{Future, Poll, Stream}; use std::error::Error as StdError; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -240,19 +240,21 @@ impl Client { impls::CopyOut(self.0.copy_out(&statement.0, params)) } - /// Executes a sequence of SQL statements. + /// Executes a sequence of SQL statements using the simple query protocol. /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that - /// point. This is intended for the execution of batches of non-dynamic statements, for example, the creation of - /// a schema for a fresh database. + /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a stream over the + /// rows, this method returns a stream over an enum which indicates either the completion of one of the commands, + /// or a row of data. This preserves the framing between the separate statements in the request. /// /// # Warning /// /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn batch_execute(&mut self, query: &str) -> BatchExecute { - BatchExecute(self.0.batch_execute(query)) + pub fn simple_query(&mut self, query: &str) -> impls::SimpleQuery { + impls::SimpleQuery(self.0.simple_query(query)) } /// A utility method to wrap a future in a database transaction. @@ -445,18 +447,11 @@ where } } -#[must_use = "futures do nothing unless polled"] -pub struct BatchExecute(proto::SimpleQueryStream); - -impl Future for BatchExecute { - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - while let Some(_) = try_ready!(self.0.poll()) {} - - Ok(Async::Ready(())) - } +pub enum SimpleQueryMessage { + Row(SimpleQueryRow), + CommandComplete(u64), + #[doc(hidden)] + __NonExhaustive, } /// An asynchronous notification. diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index a9e0d532e..f1e46309f 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -143,7 +143,7 @@ impl Client { .map_err(|_| Error::closed()) } - pub fn batch_execute(&self, query: &str) -> SimpleQueryStream { + pub fn simple_query(&self, query: &str) -> SimpleQueryStream { let pending = self.pending(|buf| { frontend::query(query, buf).map_err(Error::parse)?; Ok(()) diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index 197deea11..f80ead1b6 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -1,6 +1,5 @@ #![allow(clippy::large_enum_variant)] -use fallible_iterator::FallibleIterator; use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; @@ -8,7 +7,7 @@ use std::io; use crate::proto::{ Client, ConnectRawFuture, ConnectSocketFuture, Connection, MaybeTlsStream, SimpleQueryStream, }; -use crate::{Config, Error, Socket, TargetSessionAttrs, TlsConnect}; +use crate::{Config, Error, SimpleQueryMessage, Socket, TargetSessionAttrs, TlsConnect}; #[derive(StateMachineFuture)] pub enum ConnectOnce @@ -75,7 +74,7 @@ where if let TargetSessionAttrs::ReadWrite = state.target_session_attrs { transition!(CheckingSessionAttrs { - stream: client.batch_execute("SHOW transaction_read_only"), + stream: client.simple_query("SHOW transaction_read_only"), client, connection, }) @@ -87,24 +86,26 @@ where fn poll_checking_session_attrs<'a>( state: &'a mut RentToOwn<'a, CheckingSessionAttrs>, ) -> Poll, Error> { - if let Async::Ready(()) = state.connection.poll()? { - return Err(Error::closed()); - } + loop { + if let Async::Ready(()) = state.connection.poll()? { + return Err(Error::closed()); + } - match try_ready!(state.stream.poll()) { - Some(row) => { - let range = row.ranges().next().map_err(Error::parse)?.and_then(|r| r); - if range.map(|r| &row.buffer()[r]) == Some(b"on") { - Err(Error::connect(io::Error::new( - io::ErrorKind::PermissionDenied, - "database does not allow writes", - ))) - } else { - let state = state.take(); - transition!(Finished((state.client, state.connection))) + match try_ready!(state.stream.poll()) { + Some(SimpleQueryMessage::Row(row)) => { + if row.try_get(0)? == Some("on") { + return Err(Error::connect(io::Error::new( + io::ErrorKind::PermissionDenied, + "database does not allow writes", + ))); + } else { + let state = state.take(); + transition!(Finished((state.client, state.connection))) + } } + Some(_) => {} + None => return Err(Error::closed()), } - None => Err(Error::closed()), } } } diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index 9354f028f..69d467b2c 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -1,10 +1,12 @@ +use fallible_iterator::FallibleIterator; use futures::sync::mpsc; use futures::{Async, Poll, Stream}; -use postgres_protocol::message::backend::{DataRowBody, Message}; +use postgres_protocol::message::backend::Message; use std::mem; +use std::sync::Arc; use crate::proto::client::{Client, PendingRequest}; -use crate::Error; +use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; pub enum State { Start { @@ -12,6 +14,7 @@ pub enum State { request: PendingRequest, }, ReadResponse { + columns: Option>, receiver: mpsc::Receiver, }, Done, @@ -20,35 +23,76 @@ pub enum State { pub struct SimpleQueryStream(State); impl Stream for SimpleQueryStream { - type Item = DataRowBody; + type Item = SimpleQueryMessage; type Error = Error; - fn poll(&mut self) -> Poll, Error> { + fn poll(&mut self) -> Poll, Error> { loop { match mem::replace(&mut self.0, State::Done) { State::Start { client, request } => { let receiver = client.send(request)?; - self.0 = State::ReadResponse { receiver }; + self.0 = State::ReadResponse { + columns: None, + receiver, + }; } - State::ReadResponse { mut receiver } => { + State::ReadResponse { + columns, + mut receiver, + } => { let message = match receiver.poll() { Ok(Async::Ready(message)) => message, Ok(Async::NotReady) => { - self.0 = State::ReadResponse { receiver }; + self.0 = State::ReadResponse { columns, receiver }; return Ok(Async::NotReady); } Err(()) => unreachable!("mpsc receiver can't panic"), }; match message { - Some(Message::CommandComplete(_)) - | Some(Message::RowDescription(_)) - | Some(Message::EmptyQueryResponse) => { - self.0 = State::ReadResponse { receiver }; + Some(Message::CommandComplete(body)) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + self.0 = State::ReadResponse { + columns: None, + receiver, + }; + return Ok(Async::Ready(Some(SimpleQueryMessage::CommandComplete( + rows, + )))); + } + Some(Message::EmptyQueryResponse) => { + self.0 = State::ReadResponse { + columns: None, + receiver, + }; + return Ok(Async::Ready(Some(SimpleQueryMessage::CommandComplete(0)))); + } + Some(Message::RowDescription(body)) => { + let columns = body + .fields() + .map(|f| f.name().to_string()) + .collect::>() + .map_err(Error::parse)? + .into(); + self.0 = State::ReadResponse { + columns: Some(columns), + receiver, + }; } Some(Message::DataRow(body)) => { - self.0 = State::ReadResponse { receiver }; - return Ok(Async::Ready(Some(body))); + let row = match &columns { + Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, + None => return Err(Error::unexpected_message()), + }; + self.0 = State::ReadResponse { columns, receiver }; + return Ok(Async::Ready(Some(SimpleQueryMessage::Row(row)))); } Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), Some(Message::ReadyForQuery(_)) => return Ok(Async::Ready(None)), diff --git a/tokio-postgres/src/proto/transaction.rs b/tokio-postgres/src/proto/transaction.rs index 722336415..fbd4e5c63 100644 --- a/tokio-postgres/src/proto/transaction.rs +++ b/tokio-postgres/src/proto/transaction.rs @@ -42,7 +42,7 @@ where ) -> Poll, E> { let state = state.take(); transition!(Beginning { - begin: state.client.batch_execute("BEGIN"), + begin: state.client.simple_query("BEGIN"), client: state.client, future: state.future, }) @@ -66,11 +66,11 @@ where match state.future.poll() { Ok(Async::NotReady) => Ok(Async::NotReady), Ok(Async::Ready(t)) => transition!(Finishing { - future: state.client.batch_execute("COMMIT"), + future: state.client.simple_query("COMMIT"), result: Ok(t), }), Err(e) => transition!(Finishing { - future: state.client.batch_execute("ROLLBACK"), + future: state.client.simple_query("ROLLBACK"), result: Err(e), }), } diff --git a/tokio-postgres/src/proto/typeinfo.rs b/tokio-postgres/src/proto/typeinfo.rs index 416f61d41..02e35eaeb 100644 --- a/tokio-postgres/src/proto/typeinfo.rs +++ b/tokio-postgres/src/proto/typeinfo.rs @@ -184,27 +184,13 @@ impl PollTypeinfo for Typeinfo { None => return Err(Error::unexpected_message()), }; - let name = row - .try_get::<_, String>(0)? - .ok_or_else(Error::unexpected_message)?; - let type_ = row - .try_get::<_, i8>(1)? - .ok_or_else(Error::unexpected_message)?; - let elem_oid = row - .try_get::<_, Oid>(2)? - .ok_or_else(Error::unexpected_message)?; - let rngsubtype = row - .try_get::<_, Option>(3)? - .ok_or_else(Error::unexpected_message)?; - let basetype = row - .try_get::<_, Oid>(4)? - .ok_or_else(Error::unexpected_message)?; - let schema = row - .try_get::<_, String>(5)? - .ok_or_else(Error::unexpected_message)?; - let relid = row - .try_get::<_, Oid>(6)? - .ok_or_else(Error::unexpected_message)?; + let name = row.try_get::<_, String>(0)?; + let type_ = row.try_get::<_, i8>(1)?; + let elem_oid = row.try_get::<_, Oid>(2)?; + let rngsubtype = row.try_get::<_, Option>(3)?; + let basetype = row.try_get::<_, Oid>(4)?; + let schema = row.try_get::<_, String>(5)?; + let relid = row.try_get::<_, Oid>(6)?; let kind = if type_ == b'e' as i8 { transition!(QueryingEnumVariants { diff --git a/tokio-postgres/src/proto/typeinfo_composite.rs b/tokio-postgres/src/proto/typeinfo_composite.rs index ca24f2e01..f424fabcb 100644 --- a/tokio-postgres/src/proto/typeinfo_composite.rs +++ b/tokio-postgres/src/proto/typeinfo_composite.rs @@ -96,8 +96,8 @@ impl PollTypeinfoComposite for TypeinfoComposite { let fields = rows .iter() .map(|row| { - let name = row.try_get(0)?.ok_or_else(Error::unexpected_message)?; - let oid = row.try_get(1)?.ok_or_else(Error::unexpected_message)?; + let name = row.try_get(0)?; + let oid = row.try_get(1)?; Ok((name, oid)) }) .collect::, Error>>()?; diff --git a/tokio-postgres/src/proto/typeinfo_enum.rs b/tokio-postgres/src/proto/typeinfo_enum.rs index 2cf3d7f88..d264d3ab7 100644 --- a/tokio-postgres/src/proto/typeinfo_enum.rs +++ b/tokio-postgres/src/proto/typeinfo_enum.rs @@ -124,7 +124,7 @@ impl PollTypeinfoEnum for TypeinfoEnum { let variants = rows .iter() - .map(|row| row.try_get(0)?.ok_or_else(Error::unexpected_message)) + .map(|row| row.try_get(0)) .collect::, _>>()?; transition!(Finished((variants, state.client))) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index fb09f4916..dc287875f 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -3,15 +3,32 @@ use postgres_protocol::message::backend::DataRowBody; use std::fmt; use std::ops::Range; use std::str; +use std::sync::Arc; use crate::proto; -use crate::row::sealed::Sealed; +use crate::row::sealed::{AsName, Sealed}; use crate::stmt::Column; -use crate::types::{FromSql, WrongType}; +use crate::types::{FromSql, Type, WrongType}; use crate::Error; mod sealed { pub trait Sealed {} + + pub trait AsName { + fn as_name(&self) -> &str; + } +} + +impl AsName for Column { + fn as_name(&self) -> &str { + self.name() + } +} + +impl AsName for String { + fn as_name(&self) -> &str { + self + } } /// A trait implemented by types that can index into columns of a row. @@ -19,14 +36,19 @@ mod sealed { /// This cannot be implemented outside of this crate. pub trait RowIndex: Sealed { #[doc(hidden)] - fn __idx(&self, columns: &[Column]) -> Option; + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName; } impl Sealed for usize {} impl RowIndex for usize { #[inline] - fn __idx(&self, columns: &[Column]) -> Option { + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName, + { if *self >= columns.len() { None } else { @@ -39,8 +61,11 @@ impl Sealed for str {} impl RowIndex for str { #[inline] - fn __idx(&self, columns: &[Column]) -> Option { - if let Some(idx) = columns.iter().position(|d| d.name() == self) { + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName, + { + if let Some(idx) = columns.iter().position(|d| d.as_name() == self) { return Some(idx); }; @@ -49,7 +74,7 @@ impl RowIndex for str { // uses the US locale. columns .iter() - .position(|d| d.name().eq_ignore_ascii_case(self)) + .position(|d| d.as_name().eq_ignore_ascii_case(self)) } } @@ -60,7 +85,10 @@ where T: ?Sized + RowIndex, { #[inline] - fn __idx(&self, columns: &[Column]) -> Option { + fn __idx(&self, columns: &[U]) -> Option + where + U: AsName, + { T::__idx(*self, columns) } } @@ -100,13 +128,12 @@ impl Row { T: FromSql<'a>, { match self.get_inner(&idx) { - Ok(Some(ok)) => ok, + Ok(ok) => ok, Err(err) => panic!("error retrieving column {}: {}", idx, err), - Ok(None) => panic!("no such column {}", idx), } } - pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result, Error> + pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result where I: RowIndex, T: FromSql<'a>, @@ -114,14 +141,14 @@ impl Row { self.get_inner(&idx) } - fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result, Error> + fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result where I: RowIndex, T: FromSql<'a>, { let idx = match idx.__idx(self.columns()) { Some(idx) => idx, - None => return Ok(None), + None => return Err(Error::column()), }; let ty = self.columns()[idx].type_(); @@ -130,7 +157,62 @@ impl Row { } let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - let value = FromSql::from_sql_nullable(ty, buf); - value.map(Some).map_err(Error::from_sql) + FromSql::from_sql_nullable(ty, buf).map_err(Error::from_sql) + } +} + +pub struct SimpleQueryRow { + columns: Arc<[String]>, + body: DataRowBody, + ranges: Vec>>, +} + +impl SimpleQueryRow { + #[allow(clippy::new_ret_no_self)] + pub(crate) fn new(columns: Arc<[String]>, body: DataRowBody) -> Result { + let ranges = body.ranges().collect().map_err(Error::parse)?; + Ok(SimpleQueryRow { + columns, + body, + ranges, + }) + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn len(&self) -> usize { + self.columns.len() + } + + pub fn get(&self, idx: I) -> Option<&str> + where + I: RowIndex + fmt::Display, + { + match self.get_inner(&idx) { + Ok(ok) => ok, + Err(err) => panic!("error retrieving column {}: {}", idx, err), + } + } + + pub fn try_get(&self, idx: I) -> Result, Error> + where + I: RowIndex, + { + self.get_inner(&idx) + } + + fn get_inner(&self, idx: &I) -> Result, Error> + where + I: RowIndex, + { + let idx = match idx.__idx(&self.columns) { + Some(idx) => idx, + None => return Err(Error::column()), + }; + + let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); + FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(Error::from_sql) } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index cf10269ab..b6829925d 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -161,7 +161,11 @@ fn insert_select() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)")) + .block_on( + client + .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") + .for_each(|_| Ok(())), + ) .unwrap(); let insert = client.prepare("INSERT INTO foo (name) VALUES ($1), ($2)"); @@ -193,11 +197,15 @@ fn query_portal() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); - INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); - BEGIN;", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); + BEGIN;", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let statement = runtime @@ -233,7 +241,8 @@ fn cancel_query_raw() { runtime.handle().spawn(connection).unwrap(); let sleep = client - .batch_execute("SELECT pg_sleep(100)") + .simple_query("SELECT pg_sleep(100)") + .for_each(|_| Ok(())) .then(|r| match r { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), @@ -266,13 +275,17 @@ fn custom_enum() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TYPE pg_temp.mood AS ENUM ( - 'sad', - 'ok', - 'happy' - )", - )) + .block_on( + client + .simple_query( + "CREATE TYPE pg_temp.mood AS ENUM ( + 'sad', + 'ok', + 'happy' + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let select = client.prepare("SELECT $1::mood"); @@ -300,9 +313,13 @@ fn custom_domain() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)", - )) + .block_on( + client + .simple_query( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let select = client.prepare("SELECT $1::session_id"); @@ -346,13 +363,17 @@ fn custom_composite() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", - )) + .block_on( + client + .simple_query( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier INTEGER, + price NUMERIC + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let select = client.prepare("SELECT $1::inventory_item"); @@ -383,12 +404,16 @@ fn custom_range() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TYPE pg_temp.floatrange AS RANGE ( - subtype = float8, - subtype_diff = float8mi - )", - )) + .block_on( + client + .simple_query( + "CREATE TYPE pg_temp.floatrange AS RANGE ( + subtype = float8, + subtype_diff = float8mi + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let select = client.prepare("SELECT $1::floatrange"); @@ -438,15 +463,15 @@ fn notifications() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute("LISTEN test_notifications")) - .unwrap(); - - runtime - .block_on(client.batch_execute("NOTIFY test_notifications, 'hello'")) - .unwrap(); - - runtime - .block_on(client.batch_execute("NOTIFY test_notifications, 'world'")) + .block_on( + client + .simple_query( + "LISTEN test_notifications; + NOTIFY test_notifications, 'hello'; + NOTIFY test_notifications, 'world';", + ) + .for_each(|_| Ok(())), + ) .unwrap(); drop(client); @@ -470,15 +495,21 @@ fn transaction_commit() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - )", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); - let f = client.batch_execute("INSERT INTO foo (name) VALUES ('steven')"); + let f = client + .simple_query("INSERT INTO foo (name) VALUES ('steven')") + .for_each(|_| Ok(())); runtime .block_on(client.build_transaction().build(f)) .unwrap(); @@ -505,16 +536,21 @@ fn transaction_abort() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - )", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let f = client - .batch_execute("INSERT INTO foo (name) VALUES ('steven')") + .simple_query("INSERT INTO foo (name) VALUES ('steven')") + .for_each(|_| Ok(())) .map_err(|e| Box::new(e) as Box) .and_then(|_| Err::<(), _>(Box::::from(""))); runtime @@ -542,12 +578,16 @@ fn copy_in() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id INTEGER, - name TEXT - )", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let stream = stream::iter_ok::<_, String>(vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()]); @@ -585,12 +625,16 @@ fn copy_in_error() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id INTEGER, - name TEXT - )", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let stream = stream::iter_result(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); @@ -624,13 +668,17 @@ fn copy_out() { runtime.handle().spawn(connection).unwrap(); runtime - .block_on(client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - ); - INSERT INTO foo (name) VALUES ('jim'), ('joe');", - )) + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + ); + INSERT INTO foo (name) VALUES ('jim'), ('joe');", + ) + .for_each(|_| Ok(())), + ) .unwrap(); let data = runtime @@ -654,12 +702,13 @@ fn transaction_builder_around_moved_client() { let transaction_builder = client.build_transaction(); let work = client - .batch_execute( + .simple_query( "CREATE TEMPORARY TABLE transaction_foo ( id SERIAL, name TEXT )", ) + .for_each(|_| Ok(())) .and_then(move |_| { client .prepare("INSERT INTO transaction_foo (name) VALUES ($1), ($2)") @@ -725,7 +774,9 @@ fn poll_idle_running() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); - let execute = client.batch_execute("CREATE TEMPORARY TABLE foo (id INT)"); + let execute = client + .simple_query("CREATE TEMPORARY TABLE foo (id INT)") + .for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); let prepare = client.prepare("COPY foo FROM STDIN"); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 691a51611..48aa88033 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,4 +1,4 @@ -use futures::Future; +use futures::{Future, Stream}; use std::time::{Duration, Instant}; use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; @@ -11,7 +11,7 @@ fn smoke_test(s: &str) { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let execute = client.batch_execute("SELECT 1"); + let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); } @@ -80,7 +80,8 @@ fn cancel_query() { runtime.spawn(connection); let sleep = client - .batch_execute("SELECT pg_sleep(100)") + .simple_query("SELECT pg_sleep(100)") + .for_each(|_| Ok(())) .then(|r| match r { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index ef9c7588e..5c9e81f4f 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -212,12 +212,14 @@ fn test_bpchar_params() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - b CHAR(5) - )", - ); + let batch = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + b CHAR(5) + )", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); @@ -245,12 +247,14 @@ fn test_citext_params() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - b CITEXT - )", - ); + let batch = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + b CITEXT + )", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); @@ -393,15 +397,16 @@ fn test_slice() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - f TEXT - ); + let batch = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY, + f TEXT + ); - INSERT INTO foo(f) VALUES ('a'), ('b'), ('c'), ('d'); - ", - ); + INSERT INTO foo(f) VALUES ('a'), ('b'), ('c'), ('d');", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("SELECT f FROM foo WHERE id = ANY($1)"); @@ -424,11 +429,13 @@ fn test_slice_wrong_type() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY - )", - ); + let batch = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL PRIMARY KEY + )", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("SELECT * FROM foo WHERE id = ANY($1)"); @@ -507,10 +514,12 @@ fn domain() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); - CREATE TABLE pg_temp.foo (id pg_temp.session_id);", - ); + let batch = client + .simple_query( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); + CREATE TABLE pg_temp.foo (id pg_temp.session_id);", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let id = SessionId(b"0123456789abcdef".to_vec()); @@ -536,13 +545,15 @@ fn composite() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute( - "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", - ); + let batch = client + .simple_query( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier INTEGER, + price NUMERIC + )", + ) + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("SELECT $1::inventory_item"); @@ -571,7 +582,9 @@ fn enum_() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); - let batch = client.batch_execute("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy');"); + let batch = client + .simple_query("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy');") + .for_each(|_| Ok(())); runtime.block_on(batch).unwrap(); let prepare = client.prepare("SELECT $1::mood"); From 48af7411923d0d76d17331030462e4f228649d67 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 31 Jan 2019 20:47:42 -0800 Subject: [PATCH 127/819] Add a simple_query test --- tokio-postgres/tests/test/main.rs | 52 ++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index b6829925d..881812690 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -13,7 +13,7 @@ use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::impls; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, NoTlsStream}; +use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, NoTlsStream, SimpleQueryMessage}; mod parse; #[cfg(feature = "runtime")] @@ -737,6 +737,56 @@ fn transaction_builder_around_moved_client() { runtime.run().unwrap(); } +#[test] +fn simple_query() { + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + + let f = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + ); + INSERT INTO foo (name) VALUES ('steven'), ('joe'); + SELECT * FROM foo ORDER BY id;", + ) + .collect(); + let messages = runtime.block_on(f).unwrap(); + + match messages[0] { + SimpleQueryMessage::CommandComplete(0) => {} + _ => panic!("unexpected message"), + } + match messages[1] { + SimpleQueryMessage::CommandComplete(2) => {} + _ => panic!("unexpected message"), + } + match &messages[2] { + SimpleQueryMessage::Row(row) => { + assert_eq!(row.get(0), Some("1")); + assert_eq!(row.get(1), Some("steven")); + } + _ => panic!("unexpected message"), + } + match &messages[3] { + SimpleQueryMessage::Row(row) => { + assert_eq!(row.get(0), Some("2")); + assert_eq!(row.get(1), Some("joe")); + } + _ => panic!("unexpected message"), + } + match messages[4] { + SimpleQueryMessage::CommandComplete(2) => {} + _ => panic!("unexpected message"), + } + assert_eq!(messages.len(), 5); +} + #[test] fn poll_idle_running() { struct DelayStream(Delay); From 756121ed62f746c35d3828c2ba67251eeb979896 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 31 Jan 2019 20:59:33 -0800 Subject: [PATCH 128/819] cleanup --- tokio-postgres/src/impls.rs | 13 +++++++++++++ tokio-postgres/src/lib.rs | 21 +++++++-------------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs index ae7d1d423..2b4c6b9b3 100644 --- a/tokio-postgres/src/impls.rs +++ b/tokio-postgres/src/impls.rs @@ -121,6 +121,19 @@ impl Stream for Query { } } +/// The future returned by `Client::execute`. +#[must_use = "futures do nothing unless polled"] +pub struct Execute(pub(crate) proto::ExecuteFuture); + +impl Future for Execute { + type Item = u64; + type Error = Error; + + fn poll(&mut self) -> Poll { + self.0.poll() + } +} + /// The future returned by `Client::bind`. #[must_use = "futures do nothing unless polled"] pub struct Bind(pub(crate) proto::BindFuture); diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 94b4d1761..b1f360133 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -182,8 +182,8 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> Execute { - Execute(self.0.execute(&statement.0, params)) + pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Execute { + impls::Execute(self.0.execute(&statement.0, params)) } /// Executes a statement, returning a stream of the resulting rows. @@ -396,18 +396,6 @@ impl Statement { } } -#[must_use = "futures do nothing unless polled"] -pub struct Execute(proto::ExecuteFuture); - -impl Future for Execute { - type Item = u64; - type Error = Error; - - fn poll(&mut self) -> Poll { - self.0.poll() - } -} - /// A portal. /// /// Portals can only be used with the connection that created them, and only exist for the duration of the transaction @@ -447,8 +435,13 @@ where } } +/// Message returned by the `SimpleQuery` stream. pub enum SimpleQueryMessage { + /// A row of data. Row(SimpleQueryRow), + /// A statement in the query has completed. + /// + /// The number of rows modified or selected is returned. CommandComplete(u64), #[doc(hidden)] __NonExhaustive, From fa3241e6b5b812e6789d3bd253f04f73ff58d699 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 31 Jan 2019 21:28:04 -0800 Subject: [PATCH 129/819] Fix geo feature --- tokio-postgres/src/types/{geo_010.rs => geo_011.rs} | 2 +- tokio-postgres/src/types/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename tokio-postgres/src/types/{geo_010.rs => geo_011.rs} (97%) diff --git a/tokio-postgres/src/types/geo_010.rs b/tokio-postgres/src/types/geo_011.rs similarity index 97% rename from tokio-postgres/src/types/geo_010.rs rename to tokio-postgres/src/types/geo_011.rs index cb44d426c..bb40127ce 100644 --- a/tokio-postgres/src/types/geo_010.rs +++ b/tokio-postgres/src/types/geo_011.rs @@ -1,5 +1,5 @@ use fallible_iterator::FallibleIterator; -use geo_010::{Coordinate, LineString, Point, Rect}; +use geo_011::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index d9580c187..4b13150fb 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -78,8 +78,8 @@ mod bit_vec_05; mod chrono_04; #[cfg(feature = "with-eui48-0.4")] mod eui48_04; -#[cfg(feature = "with-geo-0.10")] -mod geo_010; +#[cfg(feature = "with-geo-0.11")] +mod geo_011; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-uuid-0.7")] From 7e0f10de0b05995c4ac449e1c05121b831400d2b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 1 Feb 2019 20:45:25 -0800 Subject: [PATCH 130/819] Cleanups --- tokio-postgres/src/types/mod.rs | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 4b13150fb..9329e1192 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -106,9 +106,7 @@ impl fmt::Display for Type { } impl Type { - // WARNING: this is not considered public API - #[doc(hidden)] - pub fn _new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { + pub(crate) fn _new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { Type(Inner::Other(Arc::new(Other { name, oid, @@ -165,7 +163,7 @@ pub enum Kind { /// A composite type along with information about its fields. Composite(Vec), #[doc(hidden)] - __PseudoPrivateForExtensibility, + __ForExtensibility, } /// Information about a field of a composite type. @@ -188,8 +186,7 @@ impl Field { } impl Field { - #[doc(hidden)] - pub fn new(name: String, type_: Type) -> Field { + pub(crate) fn new(name: String, type_: Type) -> Field { Field { name, type_ } } } @@ -201,15 +198,11 @@ pub struct WasNull; impl fmt::Display for WasNull { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.write_str(self.description()) + fmt.write_str("a Postgres value was `NULL`") } } -impl Error for WasNull { - fn description(&self) -> &str { - "a Postgres value was `NULL`" - } -} +impl Error for WasNull {} /// An error indicating that a conversion was attempted between incompatible /// Rust and Postgres types. @@ -226,15 +219,10 @@ impl fmt::Display for WrongType { } } -impl Error for WrongType { - fn description(&self) -> &str { - "cannot convert to or from a Postgres value" - } -} +impl Error for WrongType {} impl WrongType { - #[doc(hidden)] - pub fn new(ty: Type) -> WrongType { + pub(crate) fn new(ty: Type) -> WrongType { WrongType(ty) } } From fb6e19ae6093958ca5e338a7bc9c7f0e841bc664 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 17 Feb 2019 12:33:52 -0800 Subject: [PATCH 131/819] Use geo-types rather than geo Closes #418 --- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 4 ++-- tokio-postgres/src/types/{geo_011.rs => geo_types_04.rs} | 2 +- tokio-postgres/src/types/mod.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) rename tokio-postgres/src/types/{geo_011.rs => geo_types_04.rs} (97%) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 5ea814f4f..f4ff6ebd0 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -12,7 +12,7 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] "with-bit-vec-0.5" = ["tokio-postgres/with-bit-vec-0.5"] "with-chrono-0.4" = ["tokio-postgres/with-chrono-0.4"] "with-eui48-0.4" = ["tokio-postgres/with-eui48-0.4"] -"with-geo-0.11" = ["tokio-postgres/with-geo-0.11"] +"with-geo-types-0.4" = ["tokio-postgres/with-geo-types-0.4"] "with-serde_json-1" = ["tokio-postgres/with-serde_json-1"] "with-uuid-0.7" = ["tokio-postgres/with-uuid-0.7"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 5f216120d..ae6ec3c6d 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,7 +33,7 @@ runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_sta "with-bit-vec-0.5" = ["bit-vec-05"] "with-chrono-0.4" = ["chrono-04"] "with-eui48-0.4" = ["eui48-04"] -"with-geo-0.11" = ["geo-011"] +"with-geo-types-0.4" = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] "with-uuid-0.7" = ["uuid-07"] @@ -58,7 +58,7 @@ tokio-timer = { version = "0.2", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-011 = { version = "0.11", package = "geo", optional = true } +geo-types-04 = { version = "0.4", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } diff --git a/tokio-postgres/src/types/geo_011.rs b/tokio-postgres/src/types/geo_types_04.rs similarity index 97% rename from tokio-postgres/src/types/geo_011.rs rename to tokio-postgres/src/types/geo_types_04.rs index bb40127ce..9a46cc2ac 100644 --- a/tokio-postgres/src/types/geo_011.rs +++ b/tokio-postgres/src/types/geo_types_04.rs @@ -1,5 +1,5 @@ use fallible_iterator::FallibleIterator; -use geo_011::{Coordinate, LineString, Point, Rect}; +use geo_types_04::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 9329e1192..400ca533d 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -78,8 +78,8 @@ mod bit_vec_05; mod chrono_04; #[cfg(feature = "with-eui48-0.4")] mod eui48_04; -#[cfg(feature = "with-geo-0.11")] -mod geo_011; +#[cfg(feature = "with-geo-types-0.4")] +mod geo_types_04; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-uuid-0.7")] From 4bde7d2e3ceb7ab40ac137f864ffc8561943cb21 Mon Sep 17 00:00:00 2001 From: TheSandwichMakr Date: Tue, 19 Feb 2019 23:23:42 +0000 Subject: [PATCH 132/819] Omit passwords from debug output --- tokio-postgres/src/config.rs | 41 +++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 1ebe2b9af..b60166b37 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -53,7 +53,7 @@ pub(crate) enum Host { Unix(PathBuf), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] pub(crate) struct Inner { pub(crate) user: Option, pub(crate) password: Option>, @@ -69,6 +69,45 @@ pub(crate) struct Inner { pub(crate) target_session_attrs: TargetSessionAttrs, } +// Omit password from debug output +impl fmt::Debug for Inner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Inner {{ \ + user: {:?}, \ + password: {}, \ + dbname: {:?}, \ + options: {:?}, \ + application_name: {:?}, \ + ssl_mode: {:?}, \ + host: {:?}, \ + port: {:?}, \ + connect_timeout: {:?}, \ + keepalives: {:?}, \ + keepalives_idle: {:?}, \ + target_session_attrs: {:?} \ + }}", + self.user, + if self.password.is_some() { + "Some(_)" + } else { + "None" + }, + self.dbname, + self.options, + self.application_name, + self.ssl_mode, + self.host, + self.port, + self.connect_timeout, + self.keepalives, + self.keepalives_idle, + self.target_session_attrs + ) + } +} + /// Connection configuration. /// /// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: From 5dd9bb5eab6b5a981afb33cebc21787b5e953473 Mon Sep 17 00:00:00 2001 From: TheSandwichMakr Date: Wed, 20 Feb 2019 03:03:25 +0000 Subject: [PATCH 133/819] move Debug from Inner to Config, use Formatter::debug_struct --- tokio-postgres/src/config.rs | 68 +++++++++++++++--------------------- 1 file changed, 28 insertions(+), 40 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index b60166b37..93eaf9440 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -69,45 +69,6 @@ pub(crate) struct Inner { pub(crate) target_session_attrs: TargetSessionAttrs, } -// Omit password from debug output -impl fmt::Debug for Inner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Inner {{ \ - user: {:?}, \ - password: {}, \ - dbname: {:?}, \ - options: {:?}, \ - application_name: {:?}, \ - ssl_mode: {:?}, \ - host: {:?}, \ - port: {:?}, \ - connect_timeout: {:?}, \ - keepalives: {:?}, \ - keepalives_idle: {:?}, \ - target_session_attrs: {:?} \ - }}", - self.user, - if self.password.is_some() { - "Some(_)" - } else { - "None" - }, - self.dbname, - self.options, - self.application_name, - self.ssl_mode, - self.host, - self.port, - self.connect_timeout, - self.keepalives, - self.keepalives_idle, - self.target_session_attrs - ) - } -} - /// Connection configuration. /// /// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: @@ -181,7 +142,7 @@ impl fmt::Debug for Inner { /// ```not_rust /// postgresql:///mydb?user=user&host=/var/lib/postgresql /// ``` -#[derive(Debug, Clone, PartialEq)] +#[derive(Clone, PartialEq)] pub struct Config(pub(crate) Arc); impl Default for Config { @@ -453,6 +414,33 @@ impl FromStr for Config { } } +// Omit password from debug output +impl fmt::Debug for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct Redaction {} + impl fmt::Debug for Redaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_") + } + } + + f.debug_struct("Config") + .field("user", &self.0.user) + .field("password", &self.0.password.as_ref().map(|_| Redaction {})) + .field("dbname", &self.0.dbname) + .field("options", &self.0.options) + .field("application_name", &self.0.application_name) + .field("ssl_mode", &self.0.ssl_mode) + .field("host", &self.0.host) + .field("port", &self.0.port) + .field("connect_timeout", &self.0.connect_timeout) + .field("keepalives", &self.0.keepalives) + .field("keepalives_idle", &self.0.keepalives_idle) + .field("target_session_attrs", &self.0.target_session_attrs) + .finish() + } +} + #[derive(Debug)] struct UnknownOption(String); From d35139dac908a2f5320fad8a092444e7705d5632 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 20:30:57 -0800 Subject: [PATCH 134/819] Fix ToSql/FromSql docs --- tokio-postgres/src/types/mod.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 400ca533d..216183bf5 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -252,14 +252,11 @@ impl WrongType { /// In addition, some implementations are provided for types in third party /// crates. These are disabled by default; to opt into one of these /// implementations, activate the Cargo feature corresponding to the crate's -/// name prefixed by `with-`. For example, the `with-serde_json` feature enables +/// name prefixed by `with-`. For example, the `with-serde_json-1` feature enables /// the implementation for the `serde_json::Value` type. /// /// | Rust type | Postgres type(s) | /// |---------------------------------|-------------------------------------| -/// | `serialize::json::Json` | JSON, JSONB | -/// | `serde_json::Value` | JSON, JSONB | -/// | `time::Timespec` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDateTime` | TIMESTAMP | /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | @@ -267,6 +264,10 @@ impl WrongType { /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | /// | `eui48::MacAddress` | MACADDR | +/// | `geo_types::Point` | POINT | +/// | `geo_types::Rect` | BOX | +/// | `geo_types::LineString` | PATH | +/// | `serde_json::Value` | JSON, JSONB | /// | `uuid::Uuid` | UUID | /// | `bit_vec::BitVec` | BIT, VARBIT | /// | `eui48::MacAddress` | MACADDR | @@ -496,25 +497,27 @@ pub enum IsNull { /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | /// | `&[u8]`/Vec` | BYTEA | /// | `HashMap>` | HSTORE | -/// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// /// In addition, some implementations are provided for types in third party /// crates. These are disabled by default; to opt into one of these /// implementations, activate the Cargo feature corresponding to the crate's -/// name prefixed by `with-`. For example, the `with-serde_json` feature enables +/// name prefixed by `with-`. For example, the `with-serde_json-1` feature enables /// the implementation for the `serde_json::Value` type. /// /// | Rust type | Postgres type(s) | /// |---------------------------------|-------------------------------------| -/// | `serialize::json::Json` | JSON, JSONB | -/// | `serde_json::Value` | JSON, JSONB | -/// | `time::Timespec` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDateTime` | TIMESTAMP | /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `eui48::MacAddress` | MACADDR | +/// | `geo_types::Point` | POINT | +/// | `geo_types::Rect` | BOX | +/// | `geo_types::LineString` | PATH | +/// | `serde_json::Value` | JSON, JSONB | /// | `uuid::Uuid` | UUID | /// | `bit_vec::BitVec` | BIT, VARBIT | /// | `eui48::MacAddress` | MACADDR | From 07c7ffdac67298795a2ee4bb32935610fcc3864a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 21:22:50 -0800 Subject: [PATCH 135/819] Finish docs for tokio-postgres --- tokio-postgres/src/config.rs | 2 ++ tokio-postgres/src/impls.rs | 20 ++++++++++++++++++++ tokio-postgres/src/lib.rs | 30 +++++++----------------------- tokio-postgres/src/proto/tls.rs | 1 + tokio-postgres/src/row.rs | 23 +++++++++++++++++++++++ tokio-postgres/src/socket.rs | 3 +++ tokio-postgres/src/tls.rs | 24 ++++++++++++++++++++++++ 7 files changed, 80 insertions(+), 23 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 93eaf9440..e35040650 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -44,6 +44,8 @@ pub enum SslMode { Prefer, /// Require the use of TLS. Require, + #[doc(hidden)] + __NonExhaustive, } #[derive(Debug, Clone, PartialEq)] diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs index 2b4c6b9b3..546c3d7da 100644 --- a/tokio-postgres/src/impls.rs +++ b/tokio-postgres/src/impls.rs @@ -213,3 +213,23 @@ impl Stream for SimpleQuery { self.0.poll() } } + +/// The future returned by `TransactionBuilder::build`. +#[must_use = "futures do nothing unless polled"] +pub struct Transaction(pub(crate) proto::TransactionFuture) +where + T: Future, + T::Error: From; + +impl Future for Transaction +where + T: Future, + T::Error: From, +{ + type Item = T::Item; + type Error = T::Error; + + fn poll(&mut self) -> Poll { + self.0.poll() + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b1f360133..f45a3a4b8 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -87,7 +87,9 @@ //! use futures::Future; //! use tokio_postgres::{Client, Error, Statement}; //! -//! fn pipelined_prepare(client: &mut Client) -> impl Future +//! fn pipelined_prepare( +//! client: &mut Client, +//! ) -> impl Future //! { //! client.prepare("SELECT * FROM foo") //! .join(client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)")) @@ -99,7 +101,7 @@ //! The client works with arbitrary `AsyncRead + AsyncWrite` streams. Convenience APIs are provided to handle the //! connection process, but these are gated by the `runtime` Cargo feature, which is enabled by default. If disabled, //! all dependence on the tokio runtime is removed. -#![warn(rust_2018_idioms, clippy::all)] +#![warn(rust_2018_idioms, clippy::all, missing_docs)] use bytes::IntoBuf; use futures::{Future, Poll, Stream}; @@ -406,32 +408,14 @@ pub struct Portal(proto::Portal); pub struct TransactionBuilder(proto::Client); impl TransactionBuilder { - pub fn build(self, future: T) -> Transaction + /// Returns a future which wraps another in a database transaction. + pub fn build(self, future: T) -> impls::Transaction where T: Future, // FIXME error type? T::Error: From, { - Transaction(proto::TransactionFuture::new(self.0, future)) - } -} - -#[must_use = "futures do nothing unless polled"] -pub struct Transaction(proto::TransactionFuture) -where - T: Future, - T::Error: From; - -impl Future for Transaction -where - T: Future, - T::Error: From, -{ - type Item = T::Item; - type Error = T::Error; - - fn poll(&mut self) -> Poll { - self.0.poll() + impls::Transaction(proto::TransactionFuture::new(self.0, future)) } } diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs index e4274a6e9..6f909d54c 100644 --- a/tokio-postgres/src/proto/tls.rs +++ b/tokio-postgres/src/proto/tls.rs @@ -63,6 +63,7 @@ where tls: state.tls, }) } + SslMode::__NonExhaustive => unreachable!(), } } diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index dc287875f..43b872d57 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -93,6 +93,7 @@ where } } +/// A row of data returned from the database by a query. pub struct Row { statement: proto::Statement, body: DataRowBody, @@ -110,18 +111,28 @@ impl Row { }) } + /// Returns information about the columns of data in the row. pub fn columns(&self) -> &[Column] { self.statement.columns() } + /// Determines if the row contains no values. pub fn is_empty(&self) -> bool { self.len() == 0 } + /// Returns the number of values in the row. pub fn len(&self) -> usize { self.columns().len() } + /// Deserializes a value from the row. + /// + /// The value can be specified either by its numeric index in the row, or by its column name. + /// + /// # Panics + /// + /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. pub fn get<'a, I, T>(&'a self, idx: I) -> T where I: RowIndex + fmt::Display, @@ -133,6 +144,7 @@ impl Row { } } + /// Like `Row::get`, but returns a `Result` rather than panicking. pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result where I: RowIndex, @@ -161,6 +173,7 @@ impl Row { } } +/// A row of data returned from the database by a simple query. pub struct SimpleQueryRow { columns: Arc<[String]>, body: DataRowBody, @@ -178,14 +191,23 @@ impl SimpleQueryRow { }) } + /// Determines if the row contains no values. pub fn is_empty(&self) -> bool { self.len() == 0 } + /// Returns the number of values in the row. pub fn len(&self) -> usize { self.columns.len() } + /// Returns a value from the row. + /// + /// The value can be specified either by its numeric index in the row, or by its column name. + /// + /// # Panics + /// + /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. pub fn get(&self, idx: I) -> Option<&str> where I: RowIndex + fmt::Display, @@ -196,6 +218,7 @@ impl SimpleQueryRow { } } + /// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking. pub fn try_get(&self, idx: I) -> Result, Error> where I: RowIndex, diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index 641562df5..3931fa5e1 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -13,6 +13,9 @@ enum Inner { Unix(UnixStream), } +/// The standard stream type used by the crate. +/// +/// Requires the `runtime` Cargo feature (enabled by default). #[derive(Debug)] pub struct Socket(Inner); diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 7eabfab34..7060821e2 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -9,17 +9,20 @@ pub(crate) mod private { pub struct ForcePrivateApi; } +/// Channel binding information returned from a TLS handshake. pub struct ChannelBinding { pub(crate) tls_server_end_point: Option>, } impl ChannelBinding { + /// Creates a `ChannelBinding` containing no information. pub fn none() -> ChannelBinding { ChannelBinding { tls_server_end_point: None, } } + /// Creates a `ChannelBinding` containing `tls-server-end-point` channel binding information. pub fn tls_server_end_point(tls_server_end_point: Vec) -> ChannelBinding { ChannelBinding { tls_server_end_point: Some(tls_server_end_point), @@ -27,20 +30,34 @@ impl ChannelBinding { } } +/// A constructor of `TlsConnect`ors. +/// +/// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub trait MakeTlsConnect { + /// The stream type created by the `TlsConnect` implementation. type Stream: AsyncRead + AsyncWrite; + /// The `TlsConnect` implementation created by this type. type TlsConnect: TlsConnect; + /// The error type retured by the `TlsConnect` implementation. type Error: Into>; + /// Creates a new `TlsConnect`or. + /// + /// The domain name is provided for certificate verification and SNI. fn make_tls_connect(&mut self, domain: &str) -> Result; } +/// An asynchronous function wrapping a stream in a TLS session. pub trait TlsConnect { + /// The stream returned by the future. type Stream: AsyncRead + AsyncWrite; + /// The error type returned by the future. type Error: Into>; + /// The future returned by the connector. type Future: Future; + /// Returns a future performing a TLS handshake over the stream. fn connect(self, stream: S) -> Self::Future; #[doc(hidden)] @@ -49,6 +66,9 @@ pub trait TlsConnect { } } +/// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error. +/// +/// This can be used when `sslmode` is `none` or `prefer`. #[derive(Debug, Copy, Clone)] pub struct NoTls; @@ -77,6 +97,9 @@ impl TlsConnect for NoTls { } } +/// The TLS "stream" type produced by the `NoTls` connector. +/// +/// Since `NoTls` doesn't support TLS, this type is uninhabited. pub enum NoTlsStream {} impl Read for NoTlsStream { @@ -103,6 +126,7 @@ impl AsyncWrite for NoTlsStream { } } +/// The error returned by `NoTls`. #[derive(Debug)] pub struct NoTlsError(()); From 1d3c540dd9a893f71d69bfb926bbf0af4c8366db Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 21:26:10 -0800 Subject: [PATCH 136/819] Fix serde docs --- tokio-postgres/src/types/serde_json_1.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/types/serde_json_1.rs b/tokio-postgres/src/types/serde_json_1.rs index 58860f45d..b3b2e3c90 100644 --- a/tokio-postgres/src/types/serde_json_1.rs +++ b/tokio-postgres/src/types/serde_json_1.rs @@ -6,6 +6,7 @@ use std::io::Read; use crate::types::{FromSql, IsNull, ToSql, Type}; +/// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. #[derive(Debug)] pub struct Json(pub T); From 31534b57345c399e5ca9346147c89872d5f7df57 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 21:26:10 -0800 Subject: [PATCH 137/819] Fix serde docs --- postgres/src/client.rs | 3 ++- postgres/src/config.rs | 3 ++- tokio-postgres-native-tls/src/lib.rs | 4 ++-- tokio-postgres-native-tls/src/test.rs | 2 +- tokio-postgres-openssl/src/lib.rs | 4 ++-- tokio-postgres-openssl/src/test.rs | 2 +- tokio-postgres/src/lib.rs | 10 +++++++--- tokio-postgres/src/proto/connect_raw.rs | 3 ++- tokio-postgres/src/proto/tls.rs | 3 ++- tokio-postgres/src/tls.rs | 2 ++ tokio-postgres/tests/test/main.rs | 3 ++- tokio-postgres/tests/test/runtime.rs | 3 ++- 12 files changed, 27 insertions(+), 15 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index a1e4e8e93..2e181cc97 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -4,7 +4,8 @@ use std::io::{self, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::Error; #[cfg(feature = "runtime")] -use tokio_postgres::{MakeTlsConnect, Socket, TlsConnect}; +use tokio_postgres::{Socket}; +use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; #[cfg(feature = "runtime")] use crate::Config; diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 49536cb0a..de14a2d31 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,7 +4,8 @@ use log::error; use std::path::Path; use std::str::FromStr; use std::time::Duration; -use tokio_postgres::{Error, MakeTlsConnect, Socket, SslMode, TargetSessionAttrs, TlsConnect}; +use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; +use tokio_postgres::{Error, Socket, SslMode, TargetSessionAttrs}; use crate::{Client, RUNTIME}; diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 6d31c4a7c..8ff31081f 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -3,8 +3,8 @@ use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] -use tokio_postgres::MakeTlsConnect; -use tokio_postgres::{ChannelBinding, TlsConnect}; +use tokio_postgres::tls::MakeTlsConnect; +use tokio_postgres::tls::{ChannelBinding, TlsConnect}; use tokio_tls::{Connect, TlsStream}; #[cfg(test)] diff --git a/tokio-postgres-native-tls/src/test.rs b/tokio-postgres-native-tls/src/test.rs index 2a756f58d..43aab31f7 100644 --- a/tokio-postgres-native-tls/src/test.rs +++ b/tokio-postgres-native-tls/src/test.rs @@ -2,7 +2,7 @@ use futures::{Future, Stream}; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::TlsConnect; +use tokio_postgres::tls::TlsConnect; #[cfg(feature = "runtime")] use crate::MakeTlsConnector; diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index f635e1b1b..14da740cb 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -14,8 +14,8 @@ use std::sync::Arc; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_openssl::{ConnectAsync, ConnectConfigurationExt, SslStream}; #[cfg(feature = "runtime")] -use tokio_postgres::MakeTlsConnect; -use tokio_postgres::{ChannelBinding, TlsConnect}; +use tokio_postgres::tls::MakeTlsConnect; +use tokio_postgres::tls::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; diff --git a/tokio-postgres-openssl/src/test.rs b/tokio-postgres-openssl/src/test.rs index 2b5d84eaf..c3923a33b 100644 --- a/tokio-postgres-openssl/src/test.rs +++ b/tokio-postgres-openssl/src/test.rs @@ -2,7 +2,7 @@ use futures::{Future, Stream}; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio::runtime::current_thread::Runtime; -use tokio_postgres::TlsConnect; +use tokio_postgres::tls::TlsConnect; use super::*; diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index f45a3a4b8..3c655cb04 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -110,12 +110,16 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; pub use crate::config::*; -pub use crate::error::*; +use crate::error::DbError; +pub use crate::error::Error; pub use crate::row::*; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::stmt::Column; -pub use crate::tls::*; +#[cfg(feature = "runtime")] +use crate::tls::MakeTlsConnect; +pub use crate::tls::NoTls; +use crate::tls::TlsConnect; use crate::types::{ToSql, Type}; mod config; @@ -126,7 +130,7 @@ mod row; #[cfg(feature = "runtime")] mod socket; mod stmt; -mod tls; +pub mod tls; pub mod types; fn next_statement() -> String { diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs index 12efae333..45b2c3e58 100644 --- a/tokio-postgres/src/proto/connect_raw.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -12,7 +12,8 @@ use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::{Client, Connection, MaybeTlsStream, PostgresCodec, TlsFuture}; -use crate::{ChannelBinding, Config, Error, TlsConnect}; +use crate::tls::ChannelBinding; +use crate::{Config, Error, TlsConnect}; #[derive(StateMachineFuture)] pub enum ConnectRaw diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs index 6f909d54c..0f9d5376b 100644 --- a/tokio-postgres/src/proto/tls.rs +++ b/tokio-postgres/src/proto/tls.rs @@ -6,7 +6,8 @@ use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; -use crate::{ChannelBinding, Error, SslMode, TlsConnect}; +use crate::tls::ChannelBinding; +use crate::{Error, SslMode, TlsConnect}; #[derive(StateMachineFuture)] pub enum Tls diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 7060821e2..811be0a22 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -1,3 +1,5 @@ +//! TLS support. + use futures::future::{self, FutureResult}; use futures::{Future, Poll}; use std::error::Error; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 881812690..7930b1b95 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -12,8 +12,9 @@ use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::impls; +use tokio_postgres::tls::NoTlsStream; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, NoTlsStream, SimpleQueryMessage}; +use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, SimpleQueryMessage}; mod parse; #[cfg(feature = "runtime")] diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 48aa88033..2af9a18d7 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -2,7 +2,8 @@ use futures::{Future, Stream}; use std::time::{Duration, Instant}; use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; -use tokio_postgres::{NoTls, SqlState}; +use tokio_postgres::error::SqlState; +use tokio_postgres::NoTls; fn smoke_test(s: &str) { let mut runtime = Runtime::new().unwrap(); From 1f80b78f07fcff1f3dd4f644c3567145e06fb7a2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 21:51:19 -0800 Subject: [PATCH 138/819] Adjust API layout --- postgres/src/config.rs | 3 ++- tokio-postgres/src/config.rs | 2 ++ tokio-postgres/src/lib.rs | 8 ++++---- tokio-postgres/src/proto/cancel_query.rs | 3 ++- tokio-postgres/src/proto/cancel_query_raw.rs | 3 ++- tokio-postgres/src/proto/connect.rs | 3 ++- tokio-postgres/src/proto/connect_once.rs | 3 ++- tokio-postgres/src/proto/connect_socket.rs | 3 ++- tokio-postgres/src/proto/tls.rs | 3 ++- tokio-postgres/src/row.rs | 2 ++ 10 files changed, 22 insertions(+), 11 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index de14a2d31..7979f0d13 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,8 +4,9 @@ use log::error; use std::path::Path; use std::str::FromStr; use std::time::Duration; +use tokio_postgres::config::{SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use tokio_postgres::{Error, Socket, SslMode, TargetSessionAttrs}; +use tokio_postgres::{Error, Socket}; use crate::{Client, RUNTIME}; diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index e35040650..76899725f 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,3 +1,5 @@ +//! Connection configuration. + use std::borrow::Cow; use std::error; #[cfg(unix)] diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 3c655cb04..b4dd6244c 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -109,10 +109,10 @@ use std::error::Error as StdError; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio_io::{AsyncRead, AsyncWrite}; -pub use crate::config::*; +pub use crate::config::Config; use crate::error::DbError; pub use crate::error::Error; -pub use crate::row::*; +pub use crate::row::{Row, SimpleQueryRow}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::stmt::Column; @@ -122,11 +122,11 @@ pub use crate::tls::NoTls; use crate::tls::TlsConnect; use crate::types::{ToSql, Type}; -mod config; +pub mod config; pub mod error; pub mod impls; mod proto; -mod row; +pub mod row; #[cfg(feature = "runtime")] mod socket; mod stmt; diff --git a/tokio-postgres/src/proto/cancel_query.rs b/tokio-postgres/src/proto/cancel_query.rs index 1a7377c27..909fe6548 100644 --- a/tokio-postgres/src/proto/cancel_query.rs +++ b/tokio-postgres/src/proto/cancel_query.rs @@ -2,8 +2,9 @@ use futures::{try_ready, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; +use crate::config::{Host, SslMode}; use crate::proto::{CancelQueryRawFuture, ConnectSocketFuture}; -use crate::{Config, Error, Host, MakeTlsConnect, Socket, SslMode}; +use crate::{Config, Error, MakeTlsConnect, Socket}; #[derive(StateMachineFuture)] pub enum CancelQuery diff --git a/tokio-postgres/src/proto/cancel_query_raw.rs b/tokio-postgres/src/proto/cancel_query_raw.rs index 522fe318b..3580b3580 100644 --- a/tokio-postgres/src/proto/cancel_query_raw.rs +++ b/tokio-postgres/src/proto/cancel_query_raw.rs @@ -4,9 +4,10 @@ use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use tokio_io::io::{self, Flush, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; +use crate::config::SslMode; use crate::error::Error; use crate::proto::{MaybeTlsStream, TlsFuture}; -use crate::{SslMode, TlsConnect}; +use crate::TlsConnect; #[derive(StateMachineFuture)] pub enum CancelQueryRaw diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs index 1bc3f481a..510c96be6 100644 --- a/tokio-postgres/src/proto/connect.rs +++ b/tokio-postgres/src/proto/connect.rs @@ -1,8 +1,9 @@ use futures::{Async, Future, Poll}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; +use crate::config::Host; use crate::proto::{Client, ConnectOnceFuture, Connection, MaybeTlsStream}; -use crate::{Config, Error, Host, MakeTlsConnect, Socket}; +use crate::{Config, Error, MakeTlsConnect, Socket}; #[derive(StateMachineFuture)] pub enum Connect diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs index f80ead1b6..c42ebb1d1 100644 --- a/tokio-postgres/src/proto/connect_once.rs +++ b/tokio-postgres/src/proto/connect_once.rs @@ -4,10 +4,11 @@ use futures::{try_ready, Async, Future, Poll, Stream}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::io; +use crate::config::TargetSessionAttrs; use crate::proto::{ Client, ConnectRawFuture, ConnectSocketFuture, Connection, MaybeTlsStream, SimpleQueryStream, }; -use crate::{Config, Error, SimpleQueryMessage, Socket, TargetSessionAttrs, TlsConnect}; +use crate::{Config, Error, SimpleQueryMessage, Socket, TlsConnect}; #[derive(StateMachineFuture)] pub enum ConnectOnce diff --git a/tokio-postgres/src/proto/connect_socket.rs b/tokio-postgres/src/proto/connect_socket.rs index 7b7e5cb88..7b1f92056 100644 --- a/tokio-postgres/src/proto/connect_socket.rs +++ b/tokio-postgres/src/proto/connect_socket.rs @@ -11,7 +11,8 @@ use tokio_timer::Delay; #[cfg(unix)] use tokio_uds::UnixStream; -use crate::{Config, Error, Host, Socket}; +use crate::config::Host; +use crate::{Config, Error, Socket}; lazy_static! { static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs index 0f9d5376b..00df2ed19 100644 --- a/tokio-postgres/src/proto/tls.rs +++ b/tokio-postgres/src/proto/tls.rs @@ -4,10 +4,11 @@ use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use tokio_io::io::{self, ReadExact, WriteAll}; use tokio_io::{AsyncRead, AsyncWrite}; +use crate::config::SslMode; use crate::proto::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; use crate::tls::ChannelBinding; -use crate::{Error, SslMode, TlsConnect}; +use crate::{Error, TlsConnect}; #[derive(StateMachineFuture)] pub enum Tls diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 43b872d57..0dd4c0520 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -1,3 +1,5 @@ +//! Rows. + use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend::DataRowBody; use std::fmt; From 0de50abc29e790b0499b7ea2a1981dae77aeb76d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 21:55:37 -0800 Subject: [PATCH 139/819] Rustfmt --- postgres-protocol/src/authentication/sasl.rs | 2 +- postgres/src/client.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 4e34a8c87..bcd2c4b61 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -257,7 +257,7 @@ impl ScramSha256 { return Err(io::Error::new( io::ErrorKind::Other, format!("SCRAM error: {}", e), - )) + )); } ServerFinalMessage::Verifier(verifier) => verifier, }; diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2e181cc97..26a0b788b 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,11 +1,11 @@ use fallible_iterator::FallibleIterator; use futures::{Async, Future, Poll, Stream}; use std::io::{self, Read}; +use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::Error; #[cfg(feature = "runtime")] -use tokio_postgres::{Socket}; -use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; +use tokio_postgres::Socket; #[cfg(feature = "runtime")] use crate::Config; From 269197f7434c09fa923e3f96723ad53416be6704 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 22:22:51 -0800 Subject: [PATCH 140/819] Fix build --- tokio-postgres/tests/test/parse.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index ed3b59824..a7a9625b2 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,5 +1,5 @@ use std::time::Duration; -use tokio_postgres::{Config, TargetSessionAttrs}; +use tokio_postgres::config::{Config, TargetSessionAttrs}; fn check(s: &str, config: &Config) { assert_eq!(s.parse::().expect(s), *config, "`{}`", s); From 23b83e51530a2c1884d57b929b121b2df972a499 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 4 Mar 2019 22:24:51 -0800 Subject: [PATCH 141/819] Upgrade state-machine-future --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ae6ec3c6d..0b862c216 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -46,7 +46,7 @@ log = "0.4" percent-encoding = "1.0" phf = "0.7.23" postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } -state_machine_future = "0.1.7" +state_machine_future = "0.2" tokio-codec = "0.1" tokio-io = "0.1" From 9e4f4d34967c4f04608f5f7270388c957df81061 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 18:26:55 -0800 Subject: [PATCH 142/819] More clippy --- .circleci/config.yml | 2 +- codegen/src/sqlstate.rs | 2 +- tokio-postgres/src/error/sqlstate.rs | 2 +- tokio-postgres/src/types/chrono_04.rs | 4 ++-- tokio-postgres/tests/test/types/chrono_04.rs | 14 +++++++------- tokio-postgres/tests/test/types/mod.rs | 15 ++++++++------- 6 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 68edc5c18..b2d7fb732 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,7 +35,7 @@ jobs: - run: rustc --version > ~/rust-version - *RESTORE_DEPS - run: cargo fmt --all -- --check - - run: cargo clippy --all + - run: cargo clippy --all --all-targets --all-features - run: cargo test --all - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index c940f0167..2da7e8dce 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -86,7 +86,7 @@ fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter = " ) .unwrap(); diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 27ee63530..60c18e42e 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -775,7 +775,7 @@ impl SqlState { /// XX002 pub const INDEX_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX002")); } -#[cfg_attr(rustfmt, rustfmt_skip)] +#[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { key: 3213172566270843353, disps: ::phf::Slice::Static(&[ diff --git a/tokio-postgres/src/types/chrono_04.rs b/tokio-postgres/src/types/chrono_04.rs index 91ec6a801..e414a93fe 100644 --- a/tokio-postgres/src/types/chrono_04.rs +++ b/tokio-postgres/src/types/chrono_04.rs @@ -103,7 +103,7 @@ impl ToSql for DateTime { impl<'a> FromSql<'a> for NaiveDate { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; - Ok(base().date() + Duration::days(jd as i64)) + Ok(base().date() + Duration::days(i64::from(jd))) } accepts!(DATE); @@ -112,7 +112,7 @@ impl<'a> FromSql<'a> for NaiveDate { impl ToSql for NaiveDate { fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { let jd = self.signed_duration_since(base().date()).num_days(); - if jd > i32::max_value() as i64 || jd < i32::min_value() as i64 { + if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { return Err("value too large to transmit".into()); } diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index 224a5487b..eb0018065 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -5,7 +5,7 @@ use crate::types::test_type; #[test] fn test_naive_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), time, @@ -24,7 +24,7 @@ fn test_naive_date_time_params() { #[test] fn test_with_special_naive_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Timestamp, &'a str) { + fn make_check(time: &str) -> (Timestamp, &str) { ( Timestamp::Value( NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), @@ -46,7 +46,7 @@ fn test_with_special_naive_date_time_params() { #[test] fn test_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Option>, &'a str) { + fn make_check(time: &str) -> (Option>, &str) { ( Some( Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") @@ -68,7 +68,7 @@ fn test_date_time_params() { #[test] fn test_with_special_date_time_params() { - fn make_check<'a>(time: &'a str) -> (Timestamp>, &'a str) { + fn make_check(time: &str) -> (Timestamp>, &str) { ( Timestamp::Value( Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") @@ -91,7 +91,7 @@ fn test_with_special_date_time_params() { #[test] fn test_date_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveDate::parse_from_str(time, "'%Y-%m-%d'").unwrap()), time, @@ -110,7 +110,7 @@ fn test_date_params() { #[test] fn test_with_special_date_params() { - fn make_check<'a>(date: &'a str) -> (Date, &'a str) { + fn make_check(date: &str) -> (Date, &str) { ( Date::Value(NaiveDate::parse_from_str(date, "'%Y-%m-%d'").unwrap()), date, @@ -130,7 +130,7 @@ fn test_with_special_date_params() { #[test] fn test_time_params() { - fn make_check<'a>(time: &'a str) -> (Option, &'a str) { + fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveTime::parse_from_str(time, "'%H:%M:%S.%f'").unwrap()), time, diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 5c9e81f4f..9fd42ca4d 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -99,8 +99,8 @@ fn test_i32_params() { test_type( "INT", &[ - (Some(2147483548i32), "2147483548"), - (Some(-2147483548i32), "-2147483548"), + (Some(2_147_483_548i32), "2147483548"), + (Some(-2_147_483_548i32), "-2147483548"), (None, "NULL"), ], ); @@ -111,8 +111,8 @@ fn test_oid_params() { test_type( "OID", &[ - (Some(2147483548u32), "2147483548"), - (Some(4000000000), "4000000000"), + (Some(2_147_483_548u32), "2147483548"), + (Some(4_000_000_000), "4000000000"), (None, "NULL"), ], ); @@ -123,8 +123,8 @@ fn test_i64_params() { test_type( "BIGINT", &[ - (Some(9223372036854775708i64), "9223372036854775708"), - (Some(-9223372036854775708i64), "-9223372036854775708"), + (Some(9_223_372_036_854_775_708i64), "9223372036854775708"), + (Some(-9_223_372_036_854_775_708i64), "-9223372036854775708"), (None, "NULL"), ], ); @@ -343,6 +343,7 @@ fn test_array_params() { ); } +#[allow(clippy::eq_op)] fn test_nan_param(sql_type: &str) where T: PartialEq + ToSql + FromSqlOwned, @@ -616,7 +617,7 @@ fn system_time() { "'1969-12-31 23:59:58.99'", ), ( - Some(UNIX_EPOCH + Duration::from_millis(946684800 * 1000 + 1_010)), + Some(UNIX_EPOCH + Duration::from_millis(946_684_800 * 1000 + 1_010)), "'2000-01-01 00:00:01.01'", ), (None, "NULL"), From c619c741bea4884c654bd68c5e91f76db7dadf4f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 18:59:43 -0800 Subject: [PATCH 143/819] Upgrade md5 --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 2e03983b0..4c6581022 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -15,7 +15,7 @@ bytes = "0.4" fallible-iterator = "0.1" generic-array = "0.12" hmac = "0.7" -md5 = "0.5" +md5 = "0.6" memchr = "2.0" rand = "0.6" sha2 = "0.8" From 9385bebefccda77d426a68d13ddb71421a7ebb68 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 19:03:36 -0800 Subject: [PATCH 144/819] Release postgres-protocol v0.4.0 --- postgres-protocol/CHANGELOG.md | 22 ++++++++++++++++++++++ postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/lib.rs | 2 +- tokio-postgres/Cargo.toml | 2 +- 4 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 postgres-protocol/CHANGELOG.md diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md new file mode 100644 index 000000000..71ca501c8 --- /dev/null +++ b/postgres-protocol/CHANGELOG.md @@ -0,0 +1,22 @@ +# Change Log + +## [Unreleased] + +## [v0.4.0] - 2019-05-03 + +### Added + +* Added channel binding support to SCRAM authentication API. + +### Changed + +* Passwords are no longer required to be UTF8 strings. +* `types::array_to_sql` now automatically computes the required flags and no longer takes a has_nulls parameter. + +## Older + +Look at the [release tags] for information about older releases. + +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.0...master +[v0.4.0]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.3.2...postgres-protocol-v0.4.0 +[release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 4c6581022..9f274c025 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.3.2" +version = "0.4.0" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 4cdefcd20..30f35fa2b 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,7 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.3")] +#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.4")] #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 0b862c216..b9318a1c7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,7 @@ futures = "0.1.7" log = "0.4" percent-encoding = "1.0" phf = "0.7.23" -postgres-protocol = { version = "0.3.0", path = "../postgres-protocol" } +postgres-protocol = { version = "0.4.0", path = "../postgres-protocol" } state_machine_future = "0.2" tokio-codec = "0.1" tokio-io = "0.1" From 4174eeeecb0d71fa304ca83b1ce26432fd9035e8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 19:03:36 -0800 Subject: [PATCH 145/819] Release tokio-postgres v0.4.0-rc.1 --- postgres/Cargo.toml | 12 +- tokio-postgres-native-tls/Cargo.toml | 2 +- tokio-postgres-openssl/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 42 ++++++ tokio-postgres/Cargo.toml | 25 ++-- tokio-postgres/LICENSE-APACHE | 201 +++++++++++++++++++++++++++ tokio-postgres/LICENSE-MIT | 22 +++ tokio-postgres/src/lib.rs | 1 + 8 files changed, 283 insertions(+), 24 deletions(-) create mode 100644 tokio-postgres/CHANGELOG.md create mode 100644 tokio-postgres/LICENSE-APACHE create mode 100644 tokio-postgres/LICENSE-MIT diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index f4ff6ebd0..0057a7bcc 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -9,18 +9,18 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] -"with-bit-vec-0.5" = ["tokio-postgres/with-bit-vec-0.5"] -"with-chrono-0.4" = ["tokio-postgres/with-chrono-0.4"] -"with-eui48-0.4" = ["tokio-postgres/with-eui48-0.4"] -"with-geo-types-0.4" = ["tokio-postgres/with-geo-types-0.4"] +"with-bit-vec-0_5" = ["tokio-postgres/with-bit-vec-0_5"] +"with-chrono-0_4" = ["tokio-postgres/with-chrono-0_4"] +"with-eui48-0_4" = ["tokio-postgres/with-eui48-0_4"] +"with-geo-types-0_4" = ["tokio-postgres/with-geo-types-0_4"] "with-serde_json-1" = ["tokio-postgres/with-serde_json-1"] -"with-uuid-0.7" = ["tokio-postgres/with-uuid-0.7"] +"with-uuid-0_7" = ["tokio-postgres/with-uuid-0_7"] [dependencies] bytes = "0.4" fallible-iterator = "0.1" futures = "0.1" -tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } tokio = { version = "0.1", optional = true } lazy_static = { version = "1.0", optional = true } diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 9c9383483..0c7e77b25 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -13,7 +13,7 @@ futures = "0.1" native-tls = "0.2" tokio-io = "0.1" tokio-tls = "0.2.1" -tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index 2ce03eafb..afba5cdc9 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -13,7 +13,7 @@ futures = "0.1" openssl = "0.10" tokio-io = "0.1" tokio-openssl = "0.3" -tokio-postgres = { version = "0.3", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "0.1.7" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md new file mode 100644 index 000000000..af5d42ef4 --- /dev/null +++ b/tokio-postgres/CHANGELOG.md @@ -0,0 +1,42 @@ +# Change Log + +## [Unreleased] + +## [v0.4.0-rc.1] - 2019-05-03 + +### Changed + +* The client API has been significantly overhauled. It now resembles `hyper`'s, with separate `Connection` and `Client` + objects. See the crate-level documentation for more details. +* Connection string configuration now fully mirrors libpq's syntax, and supports both URL-style and key-value style + strings. +* The TLS connection mode (e.g. `prefer`) is now part of the connection configuration rather than being passed in + separately. +* The Cargo features enabling `ToSql` and `FromSql` implementations for external crates are now versioned. For example, + `with-uuid` is now `with-uuid-0_7`. This enables us to add support for new major versions of the crates in parallel + without breaking backwards compatibility. +* Upgraded from `tokio-core` to `tokio`. + +### Added + +* `FromSql` implementations can now borrow from the data buffer. In particular, this means that you can deserialize + values as `&str`. The `FromSqlOwned` trait can be used as a bound to restrict code to deserializing owned values. +* Added support for channel binding with SCRAM authentication. +* Added multi-host support in connection configuration. +* The client now supports query pipelining, which can be used as a latency hiding measure. +* While the crate uses `tokio` by default, the base API can be used with any asynchronous stream type on any reactor. +* Added support for simple query requests returning row data. + +### Removed + +* The `with-openssl` feature has been removed. Use the `tokio-postgres-openssl` crate instead. +* The `with-rustc_serialize` and `with-time` features have been removed. Use `serde` and `SystemTime` or `chrono` + instead. + +## Older + +Look at the [release tags] for information about older releases. + +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.1...master +[v0.4.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.3.0...tokio-postgres-v0.4.0-rc.1 +[release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b9318a1c7..1a6d5ef12 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "tokio-postgres" -version = "0.3.0" +version = "0.4.0-rc.1" authors = ["Steven Fackler "] edition = "2018" -license = "MIT" -description = "A native PostgreSQL driver using Tokio" +license = "MIT/Apache-2.0" +description = "A native, asynchronous PostgreSQL client" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql", "async"] @@ -14,14 +14,7 @@ categories = ["database"] test = false [package.metadata.docs.rs] -features = [ - "with-bit-vec-0.5", - "with-chrono-0.4", - "with-eui48-0.4", - "with-geo-0.11", - "with-serde_json-1", - "with-uuid-0.7", -] +all-features = true [badges] circle-ci = { repository = "sfackler/rust-postgres" } @@ -30,12 +23,12 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_static"] -"with-bit-vec-0.5" = ["bit-vec-05"] -"with-chrono-0.4" = ["chrono-04"] -"with-eui48-0.4" = ["eui48-04"] -"with-geo-types-0.4" = ["geo-types-04"] +"with-bit-vec-0_5" = ["bit-vec-05"] +"with-chrono-0_4" = ["chrono-04"] +"with-eui48-0_4" = ["eui48-04"] +"with-geo-types-0_4" = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] -"with-uuid-0.7" = ["uuid-07"] +"with-uuid-0_7" = ["uuid-07"] [dependencies] antidote = "1.0" diff --git a/tokio-postgres/LICENSE-APACHE b/tokio-postgres/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/tokio-postgres/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/tokio-postgres/LICENSE-MIT b/tokio-postgres/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/tokio-postgres/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b4dd6244c..bb1304c53 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -101,6 +101,7 @@ //! The client works with arbitrary `AsyncRead + AsyncWrite` streams. Convenience APIs are provided to handle the //! connection process, but these are gated by the `runtime` Cargo feature, which is enabled by default. If disabled, //! all dependence on the tokio runtime is removed. +#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use bytes::IntoBuf; From e549f8d635c65eb14604f84d75c6eb647389185d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 21:47:34 -0800 Subject: [PATCH 146/819] Fix features --- tokio-postgres/src/types/mod.rs | 10 +++++----- tokio-postgres/tests/test/types/mod.rs | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 216183bf5..05cbab513 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -72,17 +72,17 @@ where v.to_sql(ty, out) } -#[cfg(feature = "with-bit-vec-0.5")] +#[cfg(feature = "with-bit-vec-0_5")] mod bit_vec_05; -#[cfg(feature = "with-chrono-0.4")] +#[cfg(feature = "with-chrono-0_4")] mod chrono_04; -#[cfg(feature = "with-eui48-0.4")] +#[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-types-0.4")] +#[cfg(feature = "with-geo-types-0_4")] mod geo_types_04; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "with-uuid-0.7")] +#[cfg(feature = "with-uuid-0_7")] mod uuid_07; mod special; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 9fd42ca4d..bf9870043 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -12,17 +12,17 @@ use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, Wr use crate::connect; -#[cfg(feature = "with-bit-vec-0.7")] +#[cfg(feature = "with-bit-vec-0_7")] mod bit_vec_07; -#[cfg(feature = "with-chrono-0.4")] +#[cfg(feature = "with-chrono-0_4")] mod chrono_04; -#[cfg(feature = "with-eui48-0.4")] +#[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-0.10")] +#[cfg(feature = "with-geo-0_10")] mod geo_010; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "with-uuid-0.7")] +#[cfg(feature = "with-uuid-0_7")] mod uuid_07; fn test_type(sql_type: &str, checks: &[(T, S)]) From 4f084e7b647a26dcaa8da5301be93304cffe092f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 21:49:41 -0800 Subject: [PATCH 147/819] Release tokio-postgres v0.4.0-rc.2 --- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 9 ++++++++- tokio-postgres/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 0057a7bcc..bff0f6fd7 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -20,7 +20,7 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] bytes = "0.4" fallible-iterator = "0.1" futures = "0.1" -tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } tokio = { version = "0.1", optional = true } lazy_static = { version = "1.0", optional = true } diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index af5d42ef4..375e1b27b 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -2,6 +2,12 @@ ## [Unreleased] +## [v0.4.0-rc.2] - 2019-05-03 + +### Fixed + +* Fixed Cargo features to actually enable the functionality they claim to. + ## [v0.4.0-rc.1] - 2019-05-03 ### Changed @@ -37,6 +43,7 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.1...master +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.2...master +[v0.4.0-rc.2]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.1...tokio-postgres-v0.4.0-rc.2 [v0.4.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.3.0...tokio-postgres-v0.4.0-rc.1 [release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 1a6d5ef12..2914f8d75 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From db6dde8c7d36cd6522090707192cc78bc5035241 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Mar 2019 22:01:18 -0800 Subject: [PATCH 148/819] Document tokio-postgres-openssl --- tokio-postgres-openssl/src/lib.rs | 40 +++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 14da740cb..12b913301 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,4 +1,24 @@ -#![warn(rust_2018_idioms, clippy::all)] +//! TLS support for `tokio-postgres` via `openssl`. +//! +//! # Example +//! +//! ```no_run +//! use openssl::ssl::{SslConnector, SslMethod}; +//! use tokio_postgres_openssl::MakeTlsConnector; +//! +//! let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); +//! builder.set_ca_file("database_cert.pem").unwrap(); +//! let connector = MakeTlsConnector::new(builder.build()); +//! +//! let connect_future = tokio_postgres::connect( +//! "host=localhost user=postgres sslmode=require", +//! connector, +//! ); +//! +//! // ... +//! ``` + +#![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; #[cfg(feature = "runtime")] @@ -20,25 +40,32 @@ use tokio_postgres::tls::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; +/// A `MakeTlsConnect` implementation using the `openssl` crate. +/// +/// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] #[derive(Clone)] pub struct MakeTlsConnector { connector: SslConnector, - config: Arc Result<(), ErrorStack> + Sync + Send>, + config: Arc Result<(), ErrorStack> + Sync + Send>, } #[cfg(feature = "runtime")] impl MakeTlsConnector { + /// Creates a new connector. pub fn new(connector: SslConnector) -> MakeTlsConnector { MakeTlsConnector { connector, - config: Arc::new(|_| Ok(())), + config: Arc::new(|_, _| Ok(())), } } + /// Sets a callback used to apply per-connection configuration. + /// + /// The the callback is provided the domain name along with the `ConnectConfiguration`. pub fn set_callback(&mut self, f: F) where - F: Fn(&mut ConnectConfiguration) -> Result<(), ErrorStack> + 'static + Sync + Send, + F: Fn(&mut ConnectConfiguration, &str) -> Result<(), ErrorStack> + 'static + Sync + Send, { self.config = Arc::new(f); } @@ -55,17 +82,19 @@ where fn make_tls_connect(&mut self, domain: &str) -> Result { let mut ssl = self.connector.configure()?; - (self.config)(&mut ssl)?; + (self.config)(&mut ssl, domain)?; Ok(TlsConnector::new(ssl, domain)) } } +/// A `TlsConnect` implementation using the `openssl` crate. pub struct TlsConnector { ssl: ConnectConfiguration, domain: String, } impl TlsConnector { + /// Creates a new connector configured to connect to the specified domain. pub fn new(ssl: ConnectConfiguration, domain: &str) -> TlsConnector { TlsConnector { ssl, @@ -87,6 +116,7 @@ where } } +/// The future returned by `TlsConnector`. pub struct TlsConnectFuture(ConnectAsync); impl Future for TlsConnectFuture From 374fadb816d2f96bc14ff951560136685e00883c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 6 Mar 2019 21:05:04 -0800 Subject: [PATCH 149/819] Release tokio-postgres-openssl v0.1.0-rc.1 --- tokio-postgres-openssl/CHANGELOG.md | 9 +++++++++ tokio-postgres-openssl/Cargo.toml | 9 ++++++++- tokio-postgres-openssl/LICENSE-APACHE | 1 + tokio-postgres-openssl/LICENSE-MIT | 1 + tokio-postgres-openssl/src/lib.rs | 2 +- tokio-postgres/CHANGELOG.md | 4 ++-- 6 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 tokio-postgres-openssl/CHANGELOG.md create mode 120000 tokio-postgres-openssl/LICENSE-APACHE create mode 120000 tokio-postgres-openssl/LICENSE-MIT diff --git a/tokio-postgres-openssl/CHANGELOG.md b/tokio-postgres-openssl/CHANGELOG.md new file mode 100644 index 000000000..f0c20b8c5 --- /dev/null +++ b/tokio-postgres-openssl/CHANGELOG.md @@ -0,0 +1,9 @@ +# Change Log + +## [Unreleased] + +## v0.1.0-rc.1 - 2019-03-06 + +* Initial release. + +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-openssl-v0.1.0-rc.1...master diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index afba5cdc9..88a140744 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -1,8 +1,15 @@ [package] name = "tokio-postgres-openssl" -version = "0.1.0" +version = "0.1.0-rc.1" authors = ["Steven Fackler "] edition = "2018" +license = "MIT/Apache-2.0" +description = "TLS support for tokio-postgres via openssl" +repository = "https://github.com/sfackler/rust-postgres" +readme = "../README.md" + +[badges] +circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] diff --git a/tokio-postgres-openssl/LICENSE-APACHE b/tokio-postgres-openssl/LICENSE-APACHE new file mode 120000 index 000000000..b9e46b0fc --- /dev/null +++ b/tokio-postgres-openssl/LICENSE-APACHE @@ -0,0 +1 @@ +../tokio-postgres/LICENSE-APACHE \ No newline at end of file diff --git a/tokio-postgres-openssl/LICENSE-MIT b/tokio-postgres-openssl/LICENSE-MIT new file mode 120000 index 000000000..162832a42 --- /dev/null +++ b/tokio-postgres-openssl/LICENSE-MIT @@ -0,0 +1 @@ +../tokio-postgres/LICENSE-MIT \ No newline at end of file diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index 12b913301..f3c336cd6 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -17,7 +17,7 @@ //! //! // ... //! ``` - +#![doc(html_root_url = "https://docs.rs/tokio-postgres-openssl/0.1.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 375e1b27b..22b5197be 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -2,13 +2,13 @@ ## [Unreleased] -## [v0.4.0-rc.2] - 2019-05-03 +## [v0.4.0-rc.2] - 2019-03-05 ### Fixed * Fixed Cargo features to actually enable the functionality they claim to. -## [v0.4.0-rc.1] - 2019-05-03 +## [v0.4.0-rc.1] - 2019-03-05 ### Changed From e12902a2a5719380ffd578386746ca9ca620dfe5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 6 Mar 2019 21:13:18 -0800 Subject: [PATCH 150/819] Document tokio-postgres-native-tls --- tokio-postgres-native-tls/src/lib.rs | 34 +++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 8ff31081f..46c103462 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -1,4 +1,29 @@ -#![warn(rust_2018_idioms, clippy::all)] +//! TLS support for `tokio-postgres` via `native-tls. +//! +//! # Example +//! +//! ```no_run +//! use native_tls::{Certificate, TlsConnector}; +//! use tokio_postgres_native_tls::MakeTlsConnector; +//! use std::fs; +//! +//! let cert = fs::read("database_cert.pem").unwrap(); +//! let cert = Certificate::from_pem(&cert).unwrap(); +//! let connector = TlsConnector::builder() +//! .add_root_certificate(cert) +//! .build() +//! .unwrap(); +//! let connector = MakeTlsConnector::new(connector); +//! +//! let connect_future = tokio_postgres::connect( +//! "host=localhost user=postgres sslmode=require", +//! connector, +//! ); +//! +//! // ... +//! ``` + +#![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -10,12 +35,16 @@ use tokio_tls::{Connect, TlsStream}; #[cfg(test)] mod test; +/// A `MakeTlsConnect` implementation using the `native-tls` crate. +/// +/// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] #[derive(Clone)] pub struct MakeTlsConnector(native_tls::TlsConnector); #[cfg(feature = "runtime")] impl MakeTlsConnector { + /// Creates a new connector. pub fn new(connector: native_tls::TlsConnector) -> MakeTlsConnector { MakeTlsConnector(connector) } @@ -35,12 +64,14 @@ where } } +/// A `TlsConnect` implementation using the `native-tls` crate. pub struct TlsConnector { connector: tokio_tls::TlsConnector, domain: String, } impl TlsConnector { + /// Creates a new connector configured to connect to the specified domain. pub fn new(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { connector: tokio_tls::TlsConnector::from(connector), @@ -62,6 +93,7 @@ where } } +/// The future returned by `TlsConnector`. pub struct TlsConnectFuture(Connect); impl Future for TlsConnectFuture From e434aadf04e565f580d686f6b6f6001265602fc4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 6 Mar 2019 21:16:03 -0800 Subject: [PATCH 151/819] Release tokio-postgres-native-tls v0.1.0-rc.1 --- tokio-postgres-native-tls/CHANGELOG.md | 9 +++++++++ tokio-postgres-native-tls/Cargo.toml | 9 ++++++++- tokio-postgres-native-tls/LICENSE-APACHE | 1 + tokio-postgres-native-tls/LICENSE-MIT | 1 + tokio-postgres-native-tls/src/lib.rs | 2 +- 5 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 tokio-postgres-native-tls/CHANGELOG.md create mode 120000 tokio-postgres-native-tls/LICENSE-APACHE create mode 120000 tokio-postgres-native-tls/LICENSE-MIT diff --git a/tokio-postgres-native-tls/CHANGELOG.md b/tokio-postgres-native-tls/CHANGELOG.md new file mode 100644 index 000000000..008c0eaf9 --- /dev/null +++ b/tokio-postgres-native-tls/CHANGELOG.md @@ -0,0 +1,9 @@ +# Change Log + +## [Unreleased] + +## v0.1.0-rc.1 - 2019-03-06 + +* Initial release. + +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-native-tls-v0.1.0-rc.1...master diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 0c7e77b25..26f21f932 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -1,8 +1,15 @@ [package] name = "tokio-postgres-native-tls" -version = "0.1.0" +version = "0.1.0-rc.1" authors = ["Steven Fackler "] edition = "2018" +license = "MIT/Apache-2.0" +description = "TLS support for tokio-postgres via native-tls" +repository = "https://github.com/sfackler/rust-postgres" +readme = "../README.md" + +[badges] +circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] diff --git a/tokio-postgres-native-tls/LICENSE-APACHE b/tokio-postgres-native-tls/LICENSE-APACHE new file mode 120000 index 000000000..b9e46b0fc --- /dev/null +++ b/tokio-postgres-native-tls/LICENSE-APACHE @@ -0,0 +1 @@ +../tokio-postgres/LICENSE-APACHE \ No newline at end of file diff --git a/tokio-postgres-native-tls/LICENSE-MIT b/tokio-postgres-native-tls/LICENSE-MIT new file mode 120000 index 000000000..162832a42 --- /dev/null +++ b/tokio-postgres-native-tls/LICENSE-MIT @@ -0,0 +1 @@ +../tokio-postgres/LICENSE-MIT \ No newline at end of file diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 46c103462..1b873beae 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -22,7 +22,7 @@ //! //! // ... //! ``` - +#![doc(html_root_url = "https://docs.rs/tokio-postgres-native-tls/0.1.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; From 3b31551f733ad73e4357e238dc2cdb8316de9f9e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 10 Mar 2019 16:32:28 -0700 Subject: [PATCH 152/819] Upgrade fallible-iterator --- postgres-protocol/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/proto/prepare.rs | 2 +- tokio-postgres/src/proto/simple_query.rs | 2 +- tokio-postgres/src/types/mod.rs | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 9f274c025..993e3a27d 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -12,7 +12,7 @@ readme = "../README.md" base64 = "0.10" byteorder = "1.0" bytes = "0.4" -fallible-iterator = "0.1" +fallible-iterator = "0.2" generic-array = "0.12" hmac = "0.7" md5 = "0.6" diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index bff0f6fd7..211ae7218 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -18,7 +18,7 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" -fallible-iterator = "0.1" +fallible-iterator = "0.2" futures = "0.1" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 2914f8d75..bcdef6db1 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,7 +33,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] antidote = "1.0" bytes = "0.4" -fallible-iterator = "0.1.6" +fallible-iterator = "0.2" futures = "0.1.7" log = "0.4" percent-encoding = "1.0" diff --git a/tokio-postgres/src/proto/prepare.rs b/tokio-postgres/src/proto/prepare.rs index aa4739ec6..029bbb8a5 100644 --- a/tokio-postgres/src/proto/prepare.rs +++ b/tokio-postgres/src/proto/prepare.rs @@ -121,7 +121,7 @@ impl PollPrepare for Prepare { let columns = match message { Some(Message::RowDescription(body)) => body .fields() - .map(|f| (f.name().to_string(), f.type_oid())) + .map(|f| Ok((f.name().to_string(), f.type_oid()))) .collect() .map_err(Error::parse)?, Some(Message::NoData) => vec![], diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index 69d467b2c..71f458a84 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -77,7 +77,7 @@ impl Stream for SimpleQueryStream { Some(Message::RowDescription(body)) => { let columns = body .fields() - .map(|f| f.name().to_string()) + .map(|f| Ok(f.name().to_string())) .collect::>() .map_err(Error::parse)? .into(); diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 05cbab513..145a2530b 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -354,7 +354,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Vec { array .values() - .and_then(|v| T::from_sql_nullable(member_type, v)) + .map(|v| T::from_sql_nullable(member_type, v)) .collect() } @@ -436,7 +436,7 @@ where raw: &'a [u8], ) -> Result, S>, Box> { types::hstore_from_sql(raw)? - .map(|(k, v)| (k.to_owned(), v.map(str::to_owned))) + .map(|(k, v)| Ok((k.to_owned(), v.map(str::to_owned)))) .collect() } From 1cbf9a1b8dfe797513a3345c5bf585fbae288b1e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 10 Mar 2019 16:39:10 -0700 Subject: [PATCH 153/819] Fix build --- tokio-postgres/src/types/geo_types_04.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/types/geo_types_04.rs b/tokio-postgres/src/types/geo_types_04.rs index 9a46cc2ac..b3e7245c8 100644 --- a/tokio-postgres/src/types/geo_types_04.rs +++ b/tokio-postgres/src/types/geo_types_04.rs @@ -57,7 +57,7 @@ impl<'a> FromSql<'a> for LineString { let path = types::path_from_sql(raw)?; let points = path .points() - .map(|p| Coordinate { x: p.x(), y: p.y() }) + .map(|p| Ok(Coordinate { x: p.x(), y: p.y() })) .collect()?; Ok(LineString(points)) } From 2a80118e87bdd00a111fc28bada3e49918eb0c57 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 10 Mar 2019 17:27:28 -0700 Subject: [PATCH 154/819] Don't require use of iterators The common case is to simply want a vec of rows to work with, so make that the main API, wrapping the old version returning an iterator. --- postgres/src/client.rs | 29 ++++++-- postgres/src/lib.rs | 12 ++-- postgres/src/{query.rs => query_iter.rs} | 17 ++--- .../{query_portal.rs => query_portal_iter.rs} | 17 ++--- .../{simple_query.rs => simple_query_iter.rs} | 17 ++--- postgres/src/test.rs | 69 +++---------------- postgres/src/transaction.rs | 55 ++++++++++----- 7 files changed, 91 insertions(+), 125 deletions(-) rename postgres/src/{query.rs => query_iter.rs} (64%) rename postgres/src/{query_portal.rs => query_portal_iter.rs} (67%) rename postgres/src/{simple_query.rs => simple_query_iter.rs} (67%) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 26a0b788b..e1cc3bba1 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -3,13 +3,13 @@ use futures::{Async, Future, Poll, Stream}; use std::io::{self, Read}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; -use tokio_postgres::Error; #[cfg(feature = "runtime")] use tokio_postgres::Socket; +use tokio_postgres::{Error, Row, SimpleQueryMessage}; #[cfg(feature = "runtime")] use crate::Config; -use crate::{CopyOutReader, Query, SimpleQuery, Statement, ToStatement, Transaction}; +use crate::{CopyOutReader, QueryIter, SimpleQueryIter, Statement, ToStatement, Transaction}; pub struct Client(tokio_postgres::Client); @@ -46,12 +46,23 @@ impl Client { self.0.execute(&statement.0, params).wait() } - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.query_iter(query, params)?.collect() + } + + pub fn query_iter( + &mut self, + query: &T, + params: &[&dyn ToSql], + ) -> Result, Error> where T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - Ok(Query::new(self.0.query(&statement.0, params))) + Ok(QueryIter::new(self.0.query(&statement.0, params))) } pub fn copy_in( @@ -83,12 +94,16 @@ impl Client { CopyOutReader::new(stream) } - pub fn simple_query(&mut self, query: &str) -> Result, Error> { - Ok(SimpleQuery::new(self.0.simple_query(query))) + pub fn simple_query(&mut self, query: &str) -> Result, Error> { + self.simple_query_iter(query)?.collect() + } + + pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { + Ok(SimpleQueryIter::new(self.0.simple_query(query))) } pub fn transaction(&mut self) -> Result, Error> { - self.simple_query("BEGIN")?.count()?; + self.simple_query("BEGIN")?; Ok(Transaction::new(self)) } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index eefe5f938..f2f84f674 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -8,9 +8,9 @@ mod client; mod config; mod copy_out_reader; mod portal; -mod query; -mod query_portal; -mod simple_query; +mod query_iter; +mod query_portal_iter; +mod simple_query_iter; mod statement; mod to_statement; mod transaction; @@ -24,9 +24,9 @@ pub use crate::client::*; pub use crate::config::*; pub use crate::copy_out_reader::*; pub use crate::portal::*; -pub use crate::query::*; -pub use crate::query_portal::*; -pub use crate::simple_query::*; +pub use crate::query_iter::*; +pub use crate::query_portal_iter::*; +pub use crate::simple_query_iter::*; pub use crate::statement::*; pub use crate::to_statement::*; pub use crate::transaction::*; diff --git a/postgres/src/query.rs b/postgres/src/query_iter.rs similarity index 64% rename from postgres/src/query.rs rename to postgres/src/query_iter.rs index 4b2ae0b7b..0b99b16c4 100644 --- a/postgres/src/query.rs +++ b/postgres/src/query_iter.rs @@ -4,31 +4,26 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, Row}; -pub struct Query<'a> { +pub struct QueryIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, } // no-op impl to extend the borrow until drop -impl<'a> Drop for Query<'a> { +impl<'a> Drop for QueryIter<'a> { fn drop(&mut self) {} } -impl<'a> Query<'a> { - pub(crate) fn new(stream: impls::Query) -> Query<'a> { - Query { +impl<'a> QueryIter<'a> { + pub(crate) fn new(stream: impls::Query) -> QueryIter<'a> { + QueryIter { it: stream.wait(), _p: PhantomData, } } - - /// A convenience API which collects the resulting rows into a `Vec` and returns them. - pub fn into_vec(self) -> Result, Error> { - self.collect() - } } -impl<'a> FallibleIterator for Query<'a> { +impl<'a> FallibleIterator for QueryIter<'a> { type Item = Row; type Error = Error; diff --git a/postgres/src/query_portal.rs b/postgres/src/query_portal_iter.rs similarity index 67% rename from postgres/src/query_portal.rs rename to postgres/src/query_portal_iter.rs index 824facc1c..641136dfb 100644 --- a/postgres/src/query_portal.rs +++ b/postgres/src/query_portal_iter.rs @@ -4,31 +4,26 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, Row}; -pub struct QueryPortal<'a> { +pub struct QueryPortalIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, } // no-op impl to extend the borrow until drop -impl<'a> Drop for QueryPortal<'a> { +impl<'a> Drop for QueryPortalIter<'a> { fn drop(&mut self) {} } -impl<'a> QueryPortal<'a> { - pub(crate) fn new(stream: impls::QueryPortal) -> QueryPortal<'a> { - QueryPortal { +impl<'a> QueryPortalIter<'a> { + pub(crate) fn new(stream: impls::QueryPortal) -> QueryPortalIter<'a> { + QueryPortalIter { it: stream.wait(), _p: PhantomData, } } - - /// A convenience API which collects the resulting rows into a `Vec` and returns them. - pub fn into_vec(self) -> Result, Error> { - self.collect() - } } -impl<'a> FallibleIterator for QueryPortal<'a> { +impl<'a> FallibleIterator for QueryPortalIter<'a> { type Item = Row; type Error = Error; diff --git a/postgres/src/simple_query.rs b/postgres/src/simple_query_iter.rs similarity index 67% rename from postgres/src/simple_query.rs rename to postgres/src/simple_query_iter.rs index b3a158928..3c99bb92a 100644 --- a/postgres/src/simple_query.rs +++ b/postgres/src/simple_query_iter.rs @@ -4,31 +4,26 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, SimpleQueryMessage}; -pub struct SimpleQuery<'a> { +pub struct SimpleQueryIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, } // no-op impl to extend borrow until drop -impl<'a> Drop for SimpleQuery<'a> { +impl<'a> Drop for SimpleQueryIter<'a> { fn drop(&mut self) {} } -impl<'a> SimpleQuery<'a> { - pub(crate) fn new(stream: impls::SimpleQuery) -> SimpleQuery<'a> { - SimpleQuery { +impl<'a> SimpleQueryIter<'a> { + pub(crate) fn new(stream: impls::SimpleQuery) -> SimpleQueryIter<'a> { + SimpleQueryIter { it: stream.wait(), _p: PhantomData, } } - - /// A convenience API which collects the resulting messages into a `Vec` and returns them. - pub fn into_vec(self) -> Result, Error> { - self.collect() - } } -impl<'a> FallibleIterator for SimpleQuery<'a> { +impl<'a> FallibleIterator for SimpleQueryIter<'a> { type Item = SimpleQueryMessage; type Error = Error; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 9fa6bda02..0ff766cbb 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,3 @@ -use fallible_iterator::FallibleIterator; use std::io::Read; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -21,11 +20,7 @@ fn query_prepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); let stmt = client.prepare("SELECT $1::TEXT").unwrap(); - let rows = client - .query(&stmt, &[&"hello"]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query(&stmt, &[&"hello"]).unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -34,11 +29,7 @@ fn query_prepared() { fn query_unprepared() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); - let rows = client - .query("SELECT $1::TEXT", &[&"hello"]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query("SELECT $1::TEXT", &[&"hello"]).unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "hello"); } @@ -49,8 +40,6 @@ fn transaction_commit() { client .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") - .unwrap() - .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -61,11 +50,7 @@ fn transaction_commit() { transaction.commit().unwrap(); - let rows = client - .query("SELECT * FROM foo", &[]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); } @@ -76,8 +61,6 @@ fn transaction_rollback() { client .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") - .unwrap() - .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -88,11 +71,7 @@ fn transaction_rollback() { transaction.rollback().unwrap(); - let rows = client - .query("SELECT * FROM foo", &[]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); assert_eq!(rows.len(), 0); } @@ -102,8 +81,6 @@ fn transaction_drop() { client .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL PRIMARY KEY)") - .unwrap() - .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -114,11 +91,7 @@ fn transaction_drop() { drop(transaction); - let rows = client - .query("SELECT * FROM foo", &[]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query("SELECT * FROM foo", &[]).unwrap(); assert_eq!(rows.len(), 0); } @@ -128,8 +101,6 @@ fn nested_transactions() { client .simple_query("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") - .unwrap() - .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -148,8 +119,6 @@ fn nested_transactions() { let rows = transaction .query("SELECT id FROM foo ORDER BY id", &[]) - .unwrap() - .into_vec() .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -170,11 +139,7 @@ fn nested_transactions() { transaction3.commit().unwrap(); transaction.commit().unwrap(); - let rows = client - .query("SELECT id FROM foo ORDER BY id", &[]) - .unwrap() - .into_vec() - .unwrap(); + let rows = client.query("SELECT id FROM foo ORDER BY id", &[]).unwrap(); assert_eq!(rows.len(), 3); assert_eq!(rows[0].get::<_, i32>(0), 1); assert_eq!(rows[1].get::<_, i32>(0), 3); @@ -187,8 +152,6 @@ fn copy_in() { client .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") - .unwrap() - .count() .unwrap(); client @@ -201,8 +164,6 @@ fn copy_in() { let rows = client .query("SELECT id, name FROM foo ORDER BY id", &[]) - .unwrap() - .into_vec() .unwrap(); assert_eq!(rows.len(), 2); @@ -221,8 +182,6 @@ fn copy_out() { "CREATE TEMPORARY TABLE foo (id INT, name TEXT); INSERT INTO foo (id, name) VALUES (1, 'steven'), (2, 'timothy');", ) - .unwrap() - .count() .unwrap(); let mut reader = client @@ -234,7 +193,7 @@ fn copy_out() { assert_eq!(s, "1\tsteven\n2\ttimothy\n"); - client.simple_query("SELECT 1").unwrap().count().unwrap(); + client.simple_query("SELECT 1").unwrap(); } #[test] @@ -246,8 +205,6 @@ fn portal() { "CREATE TEMPORARY TABLE foo (id INT); INSERT INTO foo (id) VALUES (1), (2), (3);", ) - .unwrap() - .count() .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -256,20 +213,12 @@ fn portal() { .bind("SELECT * FROM foo ORDER BY id", &[]) .unwrap(); - let rows = transaction - .query_portal(&portal, 2) - .unwrap() - .into_vec() - .unwrap(); + let rows = transaction.query_portal(&portal, 2).unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); assert_eq!(rows[1].get::<_, i32>(0), 2); - let rows = transaction - .query_portal(&portal, 2) - .unwrap() - .into_vec() - .unwrap(); + let rows = transaction.query_portal(&portal, 2).unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index e76ac028c..fb56b7184 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -2,10 +2,11 @@ use fallible_iterator::FallibleIterator; use futures::Future; use std::io::Read; use tokio_postgres::types::{ToSql, Type}; -use tokio_postgres::Error; +use tokio_postgres::{Error, Row, SimpleQueryMessage}; use crate::{ - Client, CopyOutReader, Portal, Query, QueryPortal, SimpleQuery, Statement, ToStatement, + Client, CopyOutReader, Portal, QueryIter, QueryPortalIter, SimpleQueryIter, Statement, + ToStatement, }; pub struct Transaction<'a> { @@ -33,13 +34,12 @@ impl<'a> Transaction<'a> { pub fn commit(mut self) -> Result<(), Error> { self.done = true; - let it = if self.depth == 0 { - self.client.simple_query("COMMIT")? + if self.depth == 0 { + self.client.simple_query("COMMIT")?; } else { self.client - .simple_query(&format!("RELEASE sp{}", self.depth))? - }; - it.count()?; + .simple_query(&format!("RELEASE sp{}", self.depth))?; + } Ok(()) } @@ -49,13 +49,12 @@ impl<'a> Transaction<'a> { } fn rollback_inner(&mut self) -> Result<(), Error> { - let it = if self.depth == 0 { - self.client.simple_query("ROLLBACK")? + if self.depth == 0 { + self.client.simple_query("ROLLBACK")?; } else { self.client - .simple_query(&format!("ROLLBACK TO sp{}", self.depth))? - }; - it.count()?; + .simple_query(&format!("ROLLBACK TO sp{}", self.depth))?; + } Ok(()) } @@ -74,13 +73,24 @@ impl<'a> Transaction<'a> { self.client.execute(query, params) } - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, { self.client.query(query, params) } + pub fn query_iter( + &mut self, + query: &T, + params: &[&dyn ToSql], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.client.query_iter(query, params) + } + pub fn bind(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where T: ?Sized + ToStatement, @@ -93,12 +103,16 @@ impl<'a> Transaction<'a> { .map(Portal) } - pub fn query_portal( + pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { + self.query_portal_iter(portal, max_rows)?.collect() + } + + pub fn query_portal_iter( &mut self, portal: &Portal, max_rows: i32, - ) -> Result, Error> { - Ok(QueryPortal::new( + ) -> Result, Error> { + Ok(QueryPortalIter::new( self.client.get_mut().query_portal(&portal.0, max_rows), )) } @@ -127,15 +141,18 @@ impl<'a> Transaction<'a> { self.client.copy_out(query, params) } - pub fn simple_query(&mut self, query: &str) -> Result, Error> { + pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.client.simple_query(query) } + pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { + self.client.simple_query_iter(query) + } + pub fn transaction(&mut self) -> Result, Error> { let depth = self.depth + 1; self.client - .simple_query(&format!("SAVEPOINT sp{}", depth))? - .count()?; + .simple_query(&format!("SAVEPOINT sp{}", depth))?; Ok(Transaction { client: self.client, depth, From 60825d9bef06f595879c9f613506d75fb81e45d3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 24 Mar 2019 13:22:26 -0700 Subject: [PATCH 155/819] Allow custom executors --- postgres/src/config.rs | 94 ++++++++++++++++++++++++++++++++---------- 1 file changed, 73 insertions(+), 21 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 7979f0d13..5982e4fc3 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -1,8 +1,11 @@ +use futures::future::Executor; use futures::sync::oneshot; use futures::Future; use log::error; +use std::fmt; use std::path::Path; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; use tokio_postgres::config::{SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; @@ -10,22 +13,36 @@ use tokio_postgres::{Error, Socket}; use crate::{Client, RUNTIME}; -#[derive(Debug, Clone, PartialEq)] -pub struct Config(tokio_postgres::Config); +#[derive(Clone)] +pub struct Config { + config: tokio_postgres::Config, + executor: Option + Send>>>>, +} + +impl fmt::Debug for Config { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Config") + .field("config", &self.config) + .finish() + } +} impl Default for Config { fn default() -> Config { - Config(tokio_postgres::Config::default()) + Config::new() } } impl Config { pub fn new() -> Config { - Config(tokio_postgres::Config::new()) + Config { + config: tokio_postgres::Config::new(), + executor: None, + } } pub fn user(&mut self, user: &str) -> &mut Config { - self.0.user(user); + self.config.user(user); self } @@ -33,32 +50,32 @@ impl Config { where T: AsRef<[u8]>, { - self.0.password(password); + self.config.password(password); self } pub fn dbname(&mut self, dbname: &str) -> &mut Config { - self.0.dbname(dbname); + self.config.dbname(dbname); self } pub fn options(&mut self, options: &str) -> &mut Config { - self.0.options(options); + self.config.options(options); self } pub fn application_name(&mut self, application_name: &str) -> &mut Config { - self.0.application_name(application_name); + self.config.application_name(application_name); self } pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { - self.0.ssl_mode(ssl_mode); + self.config.ssl_mode(ssl_mode); self } pub fn host(&mut self, host: &str) -> &mut Config { - self.0.host(host); + self.config.host(host); self } @@ -67,27 +84,27 @@ impl Config { where T: AsRef, { - self.0.host_path(host); + self.config.host_path(host); self } pub fn port(&mut self, port: u16) -> &mut Config { - self.0.port(port); + self.config.port(port); self } pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { - self.0.connect_timeout(connect_timeout); + self.config.connect_timeout(connect_timeout); self } pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { - self.0.keepalives(keepalives); + self.config.keepalives(keepalives); self } pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { - self.0.keepalives_idle(keepalives_idle); + self.config.keepalives_idle(keepalives_idle); self } @@ -95,7 +112,15 @@ impl Config { &mut self, target_session_attrs: TargetSessionAttrs, ) -> &mut Config { - self.0.target_session_attrs(target_session_attrs); + self.config.target_session_attrs(target_session_attrs); + self + } + + pub fn executor(&mut self, executor: E) -> &mut Config + where + E: Executor + Send>> + 'static + Sync + Send, + { + self.executor = Some(Arc::new(executor)); self } @@ -106,19 +131,46 @@ impl Config { T::Stream: Send, >::Future: Send, { - let connect = self.0.connect(tls_mode); - let (client, connection) = oneshot::spawn(connect, &RUNTIME.executor()).wait()?; + let (tx, rx) = oneshot::channel(); + let connect = self + .config + .connect(tls_mode) + .then(|r| tx.send(r).map_err(|_| ())); + self.with_executor(|e| e.execute(Box::new(connect))) + .unwrap(); + let (client, connection) = rx.wait().unwrap()?; + let connection = connection.map_err(|e| error!("postgres connection error: {}", e)); - RUNTIME.executor().spawn(connection); + self.with_executor(|e| e.execute(Box::new(connection))) + .unwrap(); Ok(Client::from(client)) } + + fn with_executor(&self, f: F) -> T + where + F: FnOnce(&Executor + Send>>) -> T, + { + match &self.executor { + Some(e) => f(&**e), + None => f(&RUNTIME.executor()), + } + } } impl FromStr for Config { type Err = Error; fn from_str(s: &str) -> Result { - s.parse().map(Config) + s.parse::().map(Config::from) + } +} + +impl From for Config { + fn from(config: tokio_postgres::Config) -> Config { + Config { + config, + executor: None, + } } } From d52650a55c2703d45e0aa3a44c46cab8df780961 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 24 Mar 2019 13:32:29 -0700 Subject: [PATCH 156/819] Fixup warnings --- postgres/src/config.rs | 7 ++++--- postgres/src/lib.rs | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 5982e4fc3..ccfb5f4ca 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -16,7 +16,8 @@ use crate::{Client, RUNTIME}; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, - executor: Option + Send>>>>, + #[allow(clippy::type_complexity)] + executor: Option + Send>>>>, } impl fmt::Debug for Config { @@ -118,7 +119,7 @@ impl Config { pub fn executor(&mut self, executor: E) -> &mut Config where - E: Executor + Send>> + 'static + Sync + Send, + E: Executor + Send>> + 'static + Sync + Send, { self.executor = Some(Arc::new(executor)); self @@ -149,7 +150,7 @@ impl Config { fn with_executor(&self, f: F) -> T where - F: FnOnce(&Executor + Send>>) -> T, + F: FnOnce(&dyn Executor + Send>>) -> T, { match &self.executor { Some(e) => f(&**e), diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index f2f84f674..c4a1ae5f4 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -1,3 +1,5 @@ +#![warn(clippy::all, rust_2018_idioms)] + #[cfg(feature = "runtime")] use lazy_static::lazy_static; #[cfg(feature = "runtime")] From 948274533fcc10c16dc9e00cd009dc5642118003 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 25 Mar 2019 20:02:24 -0700 Subject: [PATCH 157/819] Config should be Sync + Send --- postgres/src/config.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index ccfb5f4ca..e142b9d2d 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -13,11 +13,13 @@ use tokio_postgres::{Error, Socket}; use crate::{Client, RUNTIME}; +type DynExecutor = dyn Executor + Send>> + Sync + Send; + #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, - #[allow(clippy::type_complexity)] - executor: Option + Send>>>>, + // this is an option since we don't want to boot up our default runtime unless we're actually going to use it. + executor: Option>, } impl fmt::Debug for Config { From 52aa260f857e7c0392f04709779c1b470227fd5b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 25 Mar 2019 21:03:22 -0700 Subject: [PATCH 158/819] Reexports and config docs --- postgres/src/client.rs | 12 ++-- postgres/src/config.rs | 124 +++++++++++++++++++++++++++++++++++- postgres/src/lib.rs | 34 +++++----- postgres/src/portal.rs | 1 - postgres/src/statement.rs | 15 ----- postgres/src/transaction.rs | 8 +-- 6 files changed, 150 insertions(+), 44 deletions(-) delete mode 100644 postgres/src/portal.rs delete mode 100644 postgres/src/statement.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e1cc3bba1..681b754e4 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -31,11 +31,11 @@ impl Client { } pub fn prepare(&mut self, query: &str) -> Result { - self.0.prepare(query).wait().map(Statement) + self.0.prepare(query).wait() } pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.0.prepare_typed(query, types).wait().map(Statement) + self.0.prepare_typed(query, types).wait() } pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result @@ -43,7 +43,7 @@ impl Client { T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - self.0.execute(&statement.0, params).wait() + self.0.execute(&statement, params).wait() } pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> @@ -62,7 +62,7 @@ impl Client { T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - Ok(QueryIter::new(self.0.query(&statement.0, params))) + Ok(QueryIter::new(self.0.query(&statement, params))) } pub fn copy_in( @@ -77,7 +77,7 @@ impl Client { { let statement = query.__statement(self)?; self.0 - .copy_in(&statement.0, params, CopyInStream(reader)) + .copy_in(&statement, params, CopyInStream(reader)) .wait() } @@ -90,7 +90,7 @@ impl Client { T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - let stream = self.0.copy_out(&statement.0, params); + let stream = self.0.copy_out(&statement, params); CopyOutReader::new(stream) } diff --git a/postgres/src/config.rs b/postgres/src/config.rs index e142b9d2d..b04c03f87 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -1,3 +1,7 @@ +//! Connection configuration. +//! +//! Requires the `runtime` Cargo feature (enabled by default). + use futures::future::Executor; use futures::sync::oneshot; use futures::Future; @@ -7,14 +11,89 @@ use std::path::Path; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use tokio_postgres::config::{SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; +#[doc(inline)] +use tokio_postgres::config::{SslMode, TargetSessionAttrs}; + use crate::{Client, RUNTIME}; type DynExecutor = dyn Executor + Send>> + Sync + Send; +/// Connection configuration. +/// +/// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: +/// +/// # Key-Value +/// +/// This format consists of space-separated key-value pairs. Values which are either the empty string or contain +/// whitespace should be wrapped in `'`. `'` and `\` characters should be backslash-escaped. +/// +/// ## Keys +/// +/// * `user` - The username to authenticate with. Required. +/// * `password` - The password to authenticate with. +/// * `dbname` - The name of the database to connect to. Defaults to the username. +/// * `options` - Command line options used to configure the server. +/// * `application_name` - Sets the `application_name` parameter on the server. +/// * `sslmode` - Controls usage of TLS. If set to `disable`, TLS will not be used. If set to `prefer`, TLS will be used +/// if available, but not used otherwise. If set to `require`, TLS will be forced to be used. Defaults to `prefer`. +/// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the +/// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts +/// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting +/// with the `connect` method. +/// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be +/// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if +/// omitted or the empty string. +/// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames +/// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. +/// This option is ignored when connecting with Unix sockets. Defaults to on. +/// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. +/// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that +/// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server +/// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// +/// ## Examples +/// +/// ```not_rust +/// host=localhost user=postgres connect_timeout=10 keepalives=0 +/// ``` +/// +/// ```not_rust +/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// ``` +/// +/// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write +/// ``` +/// +/// # Url +/// +/// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, +/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, +/// as the path component of the URL specifies the database name. +/// +/// ## Examples +/// +/// ```not_rust +/// postgresql://user@localhost +/// ``` +/// +/// ```not_rust +/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// ``` +/// +/// ```not_rust +/// postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write +/// ``` +/// +/// ```not_rust +/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// ``` #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, @@ -37,6 +116,7 @@ impl Default for Config { } impl Config { + /// Creates a new configuration. pub fn new() -> Config { Config { config: tokio_postgres::Config::new(), @@ -44,11 +124,15 @@ impl Config { } } + /// Sets the user to authenticate with. + /// + /// Required. pub fn user(&mut self, user: &str) -> &mut Config { self.config.user(user); self } + /// Sets the password to authenticate with. pub fn password(&mut self, password: T) -> &mut Config where T: AsRef<[u8]>, @@ -57,31 +141,46 @@ impl Config { self } + /// Sets the name of the database to connect to. + /// + /// Defaults to the user. pub fn dbname(&mut self, dbname: &str) -> &mut Config { self.config.dbname(dbname); self } + /// Sets command line options used to configure the server. pub fn options(&mut self, options: &str) -> &mut Config { self.config.options(options); self } + /// Sets the value of the `application_name` runtime parameter. pub fn application_name(&mut self, application_name: &str) -> &mut Config { self.config.application_name(application_name); self } + /// Sets the SSL configuration. + /// + /// Defaults to `prefer`. pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { self.config.ssl_mode(ssl_mode); self } + /// Adds a host to the configuration. + /// + /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix + /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. pub fn host(&mut self, host: &str) -> &mut Config { self.config.host(host); self } + /// Adds a Unix socket host to the configuration. + /// + /// Unlike `host`, this method allows non-UTF8 paths. #[cfg(unix)] pub fn host_path(&mut self, host: T) -> &mut Config where @@ -91,26 +190,45 @@ impl Config { self } + /// Adds a port to the configuration. + /// + /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which + /// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports + /// as hosts. pub fn port(&mut self, port: u16) -> &mut Config { self.config.port(port); self } + /// Sets the timeout applied to socket-level connection attempts. + /// + /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each + /// host separately. Defaults to no limit. pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { self.config.connect_timeout(connect_timeout); self } + /// Controls the use of TCP keepalive. + /// + /// This is ignored for Unix domain socket connections. Defaults to `true`. pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { self.config.keepalives(keepalives); self } + /// Sets the amount of idle time before a keepalive packet is sent on the connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { self.config.keepalives_idle(keepalives_idle); self } + /// Sets the requirements of the session. + /// + /// This can be used to connect to the primary server in a clustered database rather than one of the read-only + /// secondary servers. Defaults to `Any`. pub fn target_session_attrs( &mut self, target_session_attrs: TargetSessionAttrs, @@ -119,6 +237,9 @@ impl Config { self } + /// Sets the executor used to run the connection futures. + /// + /// Defaults to a postgres-specific tokio `Runtime`. pub fn executor(&mut self, executor: E) -> &mut Config where E: Executor + Send>> + 'static + Sync + Send, @@ -127,6 +248,7 @@ impl Config { self } + /// Opens a connection to a PostgreSQL database. pub fn connect(&self, tls_mode: T) -> Result where T: MakeTlsConnect + 'static + Send, diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index c4a1ae5f4..112b10821 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -5,15 +5,31 @@ use lazy_static::lazy_static; #[cfg(feature = "runtime")] use tokio::runtime::{self, Runtime}; +pub use tokio_postgres::{error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement}; + +pub use crate::client::*; +#[cfg(feature = "runtime")] +pub use crate::config::Config; +pub use crate::copy_out_reader::*; +#[doc(no_inline)] +pub use crate::error::Error; +pub use crate::query_iter::*; +pub use crate::query_portal_iter::*; +#[doc(no_inline)] +pub use crate::row::{Row, SimpleQueryRow}; +pub use crate::simple_query_iter::*; +#[doc(no_inline)] +pub use crate::tls::NoTls; +pub use crate::to_statement::*; +pub use crate::transaction::*; + mod client; #[cfg(feature = "runtime")] -mod config; +pub mod config; mod copy_out_reader; -mod portal; mod query_iter; mod query_portal_iter; mod simple_query_iter; -mod statement; mod to_statement; mod transaction; @@ -21,18 +37,6 @@ mod transaction; #[cfg(test)] mod test; -pub use crate::client::*; -#[cfg(feature = "runtime")] -pub use crate::config::*; -pub use crate::copy_out_reader::*; -pub use crate::portal::*; -pub use crate::query_iter::*; -pub use crate::query_portal_iter::*; -pub use crate::simple_query_iter::*; -pub use crate::statement::*; -pub use crate::to_statement::*; -pub use crate::transaction::*; - #[cfg(feature = "runtime")] lazy_static! { static ref RUNTIME: Runtime = runtime::Builder::new() diff --git a/postgres/src/portal.rs b/postgres/src/portal.rs deleted file mode 100644 index cf22e5755..000000000 --- a/postgres/src/portal.rs +++ /dev/null @@ -1 +0,0 @@ -pub struct Portal(pub(crate) tokio_postgres::Portal); diff --git a/postgres/src/statement.rs b/postgres/src/statement.rs deleted file mode 100644 index b6abe2a51..000000000 --- a/postgres/src/statement.rs +++ /dev/null @@ -1,15 +0,0 @@ -use tokio_postgres::types::Type; -use tokio_postgres::Column; - -#[derive(Clone)] -pub struct Statement(pub(crate) tokio_postgres::Statement); - -impl Statement { - pub fn params(&self) -> &[Type] { - self.0.params() - } - - pub fn columns(&self) -> &[Column] { - self.0.columns() - } -} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index fb56b7184..f4f4a2648 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -96,11 +96,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { let statement = query.__statement(&mut self.client)?; - self.client - .get_mut() - .bind(&statement.0, params) - .wait() - .map(Portal) + self.client.get_mut().bind(&statement, params).wait() } pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { @@ -113,7 +109,7 @@ impl<'a> Transaction<'a> { max_rows: i32, ) -> Result, Error> { Ok(QueryPortalIter::new( - self.client.get_mut().query_portal(&portal.0, max_rows), + self.client.get_mut().query_portal(&portal, max_rows), )) } From 2c786587bbd2e506edf9de1d07c153544c0636eb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Mar 2019 20:20:15 -0700 Subject: [PATCH 159/819] Start on postgres docs --- postgres/src/client.rs | 143 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 681b754e4..95152c899 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -11,9 +11,15 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; use crate::Config; use crate::{CopyOutReader, QueryIter, SimpleQueryIter, Statement, ToStatement, Transaction}; +/// A synchronous PostgreSQL client. +/// +/// This is a lightweight wrapper over the asynchronous tokio_postgres `Client`. pub struct Client(tokio_postgres::Client); impl Client { + /// A convenience function which parses a configuration string into a `Config` and then connects to the database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn connect(params: &str, tls_mode: T) -> Result where @@ -25,19 +31,64 @@ impl Client { params.parse::()?.connect(tls_mode) } + /// Returns a new `Config` object which can be used to configure and connect to a database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] pub fn configure() -> Config { Config::new() } + /// Creates a new prepared statement. + /// + /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), + /// which are set when executed. Prepared statements can only be used with the connection that created them. pub fn prepare(&mut self, query: &str) -> Result { self.0.prepare(query).wait() } + /// Like `prepare`, but allows the types of query parameters to be explicitly specified. + /// + /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be + /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { self.0.prepare_typed(query, types).wait() } + /// Executes a statement, returning the number of rows modified. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + /// + /// # Example + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let bar = 1i32; + /// let baz = true; + /// let rows_updated = client.execute( + /// "UPDATE foo SET bar = $1 WHERE baz = $2", + /// &[&bar, &baz], + /// )?; + /// + /// println!("{} rows updated", rows_updated); + /// # Ok(()) + /// # } + /// ``` pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where T: ?Sized + ToStatement, @@ -46,6 +97,37 @@ impl Client { self.0.execute(&statement, params).wait() } + /// Executes a statement, returning the resulting rows. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// The `query_iter` method can be used to avoid buffering all rows in memory at once. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let baz = true; + /// for row in client.query("SELECT foo FROM bar WHERE baz = $1", &[&baz])? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, @@ -53,6 +135,31 @@ impl Client { self.query_iter(query, params)?.collect() } + /// Like `query`, except that it returns a fallible iterator over the resulting rows rather than buffering the + /// response in memory. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// use fallible_iterator::FallibleIterator; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let baz = true; + /// let mut it = client.query_iter("SELECT foo FROM bar WHERE baz = $1", &[&baz])?; + /// + /// while let Some(row) = it.next()? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } pub fn query_iter( &mut self, query: &T, @@ -65,6 +172,10 @@ impl Client { Ok(QueryIter::new(self.0.query(&statement, params))) } + /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. The data in the provided reader is + /// passed along to the server verbatim; it is the caller's responsibility to ensure it uses the proper format. pub fn copy_in( &mut self, query: &T, @@ -81,6 +192,9 @@ impl Client { .wait() } + /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. pub fn copy_out( &mut self, query: &T, @@ -94,14 +208,43 @@ impl Client { CopyOutReader::new(stream) } + /// Executes a sequence of SQL statements using the simple query protocol. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning the rows, this + /// method returns a sequence of an enum which indicates either the completion of one of the commands, or a row of + /// data. This preserves the framing between the separate statements in the request. + /// + /// This is a simple convenience method over `simple_query_iter`. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.simple_query_iter(query)?.collect() } + /// Executes a sequence of SQL statements using the simple query protocol. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning the rows, this + /// method returns a sequence of an enum which indicates either the completion of one of the commands, or a row of + /// data. This preserves the framing between the separate statements in the request. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { Ok(SimpleQueryIter::new(self.0.simple_query(query))) } + /// Begins a new database transaction. pub fn transaction(&mut self) -> Result, Error> { self.simple_query("BEGIN")?; Ok(Transaction::new(self)) From 7c9717d764a59ed1e075d819970b6506544e5c15 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 28 Mar 2019 21:55:42 -0700 Subject: [PATCH 160/819] Finish Client docs --- postgres/src/client.rs | 140 ++++++++++++++++++++++++++++++++++------- 1 file changed, 117 insertions(+), 23 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 95152c899..64d14eeb3 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -19,7 +19,11 @@ pub struct Client(tokio_postgres::Client); impl Client { /// A convenience function which parses a configuration string into a `Config` and then connects to the database. /// + /// See the documentation for [`Config`] for information about the connection syntax. + /// /// Requires the `runtime` Cargo feature (enabled by default). + /// + /// [`Config`]: config/struct.Config.html #[cfg(feature = "runtime")] pub fn connect(params: &str, tls_mode: T) -> Result where @@ -39,22 +43,6 @@ impl Client { Config::new() } - /// Creates a new prepared statement. - /// - /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), - /// which are set when executed. Prepared statements can only be used with the connection that created them. - pub fn prepare(&mut self, query: &str) -> Result { - self.0.prepare(query).wait() - } - - /// Like `prepare`, but allows the types of query parameters to be explicitly specified. - /// - /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be - /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. - pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.0.prepare_typed(query, types).wait() - } - /// Executes a statement, returning the number of rows modified. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list @@ -172,10 +160,80 @@ impl Client { Ok(QueryIter::new(self.0.query(&statement, params))) } + /// Creates a new prepared statement. + /// + /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), + /// which are set when executed. Prepared statements can only be used with the connection that created them. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let statement = client.prepare("SELECT name FROM people WHERE id = $1")?; + /// + /// for id in 0..10 { + /// let rows = client.query(&statement, &[&id])?; + /// let name: &str = rows[0].get(0); + /// println!("name: {}", name); + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn prepare(&mut self, query: &str) -> Result { + self.0.prepare(query).wait() + } + + /// Like `prepare`, but allows the types of query parameters to be explicitly specified. + /// + /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be + /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// use postgres::types::Type; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let statement = client.prepare_typed( + /// "SELECT name FROM people WHERE id = $1", + /// &[Type::INT8], + /// )?; + /// + /// for id in 0..10 { + /// let rows = client.query(&statement, &[&id])?; + /// let name: &str = rows[0].get(0); + /// println!("name: {}", name); + /// } + /// # Ok(()) + /// # } + pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { + self.0.prepare_typed(query, types).wait() + } + /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. /// /// The `query` argument can either be a `Statement`, or a raw query string. The data in the provided reader is /// passed along to the server verbatim; it is the caller's responsibility to ensure it uses the proper format. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// client.copy_in("COPY people FROM stdin", &[], &mut "1\tjohn\n2\tjane\n".as_bytes())?; + /// # Ok(()) + /// # } + /// ``` pub fn copy_in( &mut self, query: &T, @@ -195,6 +253,22 @@ impl Client { /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. /// /// The `query` argument can either be a `Statement`, or a raw query string. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// use std::io::Read; + /// + /// # fn main() -> Result<(), Box> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let mut reader = client.copy_out("COPY people TO stdout", &[])?; + /// let mut buf = vec![]; + /// reader.read_to_end(&mut buf)?; + /// # Ok(()) + /// # } + /// ``` pub fn copy_out( &mut self, query: &T, @@ -227,13 +301,8 @@ impl Client { self.simple_query_iter(query)?.collect() } - /// Executes a sequence of SQL statements using the simple query protocol. - /// - /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that - /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, - /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning the rows, this - /// method returns a sequence of an enum which indicates either the completion of one of the commands, or a row of - /// data. This preserves the framing between the separate statements in the request. + /// Like `simple_query`, except that it returns a fallible iterator over the resulting values rather than buffering + /// the response in memory. /// /// # Warning /// @@ -245,23 +314,48 @@ impl Client { } /// Begins a new database transaction. + /// + /// The transaction will roll back by default - use the `commit` method to commit it. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let mut transaction = client.transaction()?; + /// transaction.execute("UPDATE foo SET bar = 10", &[])?; + /// // ... + /// + /// transaction.commit()?; + /// # Ok(()) + /// # } + /// ``` pub fn transaction(&mut self) -> Result, Error> { self.simple_query("BEGIN")?; Ok(Transaction::new(self)) } + /// Determines if the client's connection has already closed. + /// + /// If this returns `true`, the client is no longer usable. pub fn is_closed(&self) -> bool { self.0.is_closed() } + /// Returns a shared reference to the inner nonblocking client. pub fn get_ref(&self) -> &tokio_postgres::Client { &self.0 } + /// Returns a mutable reference to the inner nonblocking client. pub fn get_mut(&mut self) -> &mut tokio_postgres::Client { &mut self.0 } + /// Consumes the client, returning the inner nonblocking client. pub fn into_inner(self) -> tokio_postgres::Client { self.0 } From aa3aedda56a5480a421c92645399e6ccbf36f0c7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Mar 2019 20:57:12 -0700 Subject: [PATCH 161/819] More docs --- postgres/src/copy_out_reader.rs | 1 + postgres/src/query_iter.rs | 1 + postgres/src/query_portal_iter.rs | 1 + postgres/src/simple_query_iter.rs | 1 + postgres/src/to_statement.rs | 6 ++++++ postgres/src/transaction.rs | 34 +++++++++++++++++++++++++++++++ 6 files changed, 44 insertions(+) diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 0bea15be1..b5a92aac9 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -5,6 +5,7 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::Error; +// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { it: stream::Wait, cur: Cursor, diff --git a/postgres/src/query_iter.rs b/postgres/src/query_iter.rs index 0b99b16c4..8f9a50598 100644 --- a/postgres/src/query_iter.rs +++ b/postgres/src/query_iter.rs @@ -4,6 +4,7 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, Row}; +/// The iterator returned by the `query_iter` method. pub struct QueryIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, diff --git a/postgres/src/query_portal_iter.rs b/postgres/src/query_portal_iter.rs index 641136dfb..8fab34865 100644 --- a/postgres/src/query_portal_iter.rs +++ b/postgres/src/query_portal_iter.rs @@ -4,6 +4,7 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, Row}; +/// The iterator returned by the `query_portal_iter` method. pub struct QueryPortalIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, diff --git a/postgres/src/simple_query_iter.rs b/postgres/src/simple_query_iter.rs index 3c99bb92a..3053cd30b 100644 --- a/postgres/src/simple_query_iter.rs +++ b/postgres/src/simple_query_iter.rs @@ -4,6 +4,7 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::{Error, SimpleQueryMessage}; +/// The iterator returned by the `simple_query_iter` method. pub struct SimpleQueryIter<'a> { it: stream::Wait, _p: PhantomData<&'a mut ()>, diff --git a/postgres/src/to_statement.rs b/postgres/src/to_statement.rs index 2f24d2035..5cbe56a38 100644 --- a/postgres/src/to_statement.rs +++ b/postgres/src/to_statement.rs @@ -6,6 +6,12 @@ mod sealed { pub trait Sealed {} } +/// A trait abstracting over prepared and unprepared statements. +/// +/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which +/// was prepared previously. +/// +/// This trait is "sealed" and cannot be implemented by anything outside this crate. pub trait ToStatement: sealed::Sealed { #[doc(hidden)] fn __statement(&self, client: &mut Client) -> Result; diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index f4f4a2648..67b3def39 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -9,6 +9,10 @@ use crate::{ ToStatement, }; +/// A representation of a PostgreSQL database transaction. +/// +/// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made +/// in the transaction. Transactions can be nested, with inner transactions implemented via safepoints. pub struct Transaction<'a> { client: &'a mut Client, depth: u32, @@ -32,6 +36,7 @@ impl<'a> Transaction<'a> { } } + /// Consumes the transaction, committing all changes made within it. pub fn commit(mut self) -> Result<(), Error> { self.done = true; if self.depth == 0 { @@ -43,6 +48,9 @@ impl<'a> Transaction<'a> { Ok(()) } + /// Rolls the transaction back, discarding all changes made within it. + /// + /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub fn rollback(mut self) -> Result<(), Error> { self.done = true; self.rollback_inner() @@ -58,14 +66,17 @@ impl<'a> Transaction<'a> { Ok(()) } + //// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { self.client.prepare(query) } + //// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { self.client.prepare_typed(query, types) } + //// Like `Client::execute`. pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where T: ?Sized + ToStatement, @@ -73,6 +84,7 @@ impl<'a> Transaction<'a> { self.client.execute(query, params) } + //// Like `Client::query`. pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, @@ -80,6 +92,7 @@ impl<'a> Transaction<'a> { self.client.query(query, params) } + //// Like `Client::query_iter`. pub fn query_iter( &mut self, query: &T, @@ -91,6 +104,16 @@ impl<'a> Transaction<'a> { self.client.query_iter(query, params) } + /// Binds parameters to a statement, creating a "portal". + /// + /// Portals can be used with the `query_portal` method to page through the results of a query without being forced + /// to consume them all immediately. + /// + /// Portals are automatically closed when the transaction they were created in is closed. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn bind(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where T: ?Sized + ToStatement, @@ -99,10 +122,16 @@ impl<'a> Transaction<'a> { self.client.get_mut().bind(&statement, params).wait() } + /// Continues execution of a portal, returning the next set of rows. + /// + /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to + /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { self.query_portal_iter(portal, max_rows)?.collect() } + /// Like `query_portal`, except that it returns a fallible iterator over the resulting rows rather than buffering + /// the entire response in memory. pub fn query_portal_iter( &mut self, portal: &Portal, @@ -113,6 +142,7 @@ impl<'a> Transaction<'a> { )) } + /// Like `Client::copy_in`. pub fn copy_in( &mut self, query: &T, @@ -126,6 +156,7 @@ impl<'a> Transaction<'a> { self.client.copy_in(query, params, reader) } + /// Like `Client::copy_out`. pub fn copy_out( &mut self, query: &T, @@ -137,14 +168,17 @@ impl<'a> Transaction<'a> { self.client.copy_out(query, params) } + /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.client.simple_query(query) } + /// Like `Client::simple_query_iter`. pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { self.client.simple_query_iter(query) } + /// Like `Client::transaction`. pub fn transaction(&mut self) -> Result, Error> { let depth = self.depth + 1; self.client From 16021d2985ea5765c8c5714eea5c76759db6e3ef Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Mar 2019 20:58:01 -0700 Subject: [PATCH 162/819] Fix docs --- postgres/src/copy_out_reader.rs | 2 +- postgres/src/lib.rs | 3 ++- postgres/src/transaction.rs | 10 +++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index b5a92aac9..ff0c30a24 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -5,7 +5,7 @@ use std::marker::PhantomData; use tokio_postgres::impls; use tokio_postgres::Error; -// The reader returned by the `copy_out` method. +/// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { it: stream::Wait, cur: Cursor, diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 112b10821..1555cb058 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -1,4 +1,5 @@ -#![warn(clippy::all, rust_2018_idioms)] +//! +#![warn(clippy::all, rust_2018_idioms, missing_docs)] #[cfg(feature = "runtime")] use lazy_static::lazy_static; diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 67b3def39..8850baee8 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -66,17 +66,17 @@ impl<'a> Transaction<'a> { Ok(()) } - //// Like `Client::prepare`. + /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { self.client.prepare(query) } - //// Like `Client::prepare_typed`. + /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { self.client.prepare_typed(query, types) } - //// Like `Client::execute`. + /// Like `Client::execute`. pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result where T: ?Sized + ToStatement, @@ -84,7 +84,7 @@ impl<'a> Transaction<'a> { self.client.execute(query, params) } - //// Like `Client::query`. + /// Like `Client::query`. pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> where T: ?Sized + ToStatement, @@ -92,7 +92,7 @@ impl<'a> Transaction<'a> { self.client.query(query, params) } - //// Like `Client::query_iter`. + /// Like `Client::query_iter`. pub fn query_iter( &mut self, query: &T, From aaaf8247ec4c75de9a999f19be0b11f63832834f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 31 Mar 2019 21:48:32 -0700 Subject: [PATCH 163/819] Crate level docs --- postgres/src/lib.rs | 53 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 1555cb058..41d85ad52 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -1,4 +1,57 @@ +//! A synchronous client for the PostgreSQL database. //! +//! # Example +//! +//! ```no_run +//! use postgres::{Client, NoTls}; +//! +//! # fn main() -> Result<(), postgres::Error> { +//! let mut client = Client::connect("host=localhost user=postgres", NoTls)?; +//! +//! client.simple_query(" +//! CREATE TABLE person ( +//! id SERIAL PRIMARY KEY, +//! name TEXT NOT NULL, +//! data BYTEA +//! ) +//! ")?; +//! +//! let name = "Ferris"; +//! let data = None::<&[u8]>; +//! client.execute( +//! "INSERT INTO person (name, data) VALUES ($1, $2)", +//! &[&name, &data], +//! )?; +//! +//! for row in client.query("SELECT id, name, data FROM person", &[])? { +//! let id: i32 = row.get(0); +//! let name: &str = row.get(1); +//! let data: Option<&[u8]> = row.get(2); +//! +//! println!("found person: {} {} {:?}", id, name, data); +//! } +//! # Ok(()) +//! # } +//! ``` +//! +//! # Implementation +//! +//! This crate is a lightweight wrapper over tokio-postgres. The `tokio_postgres::Connection` is spawned onto an +//! executor, and the `tokio_postgres::Client` is wrapped in the `postgres::Client`, which simply waits on the futures +//! the nonblocking client creates. +//! +//! # Runtime +//! +//! A client can be constructed directly from a `tokio-postgres` client via a `From` implementation, but the `runtime` +//! Cargo feature (enabled by default) provides a more convenient interface. By default, connections will be spawned +//! onto a static tokio `Runtime`, but a custom `Executor` can also be used instead. +//! +//! # SSL/TLS support +//! +//! TLS support is implemented via external libraries. `Client::connect` and `Config::connect` take a TLS implementation +//! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the +//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `postgres` and +//! `native-tls` crates, respectively. #![warn(clippy::all, rust_2018_idioms, missing_docs)] #[cfg(feature = "runtime")] From 49c22a8a3936a86181facd6d95165cf4a2f2860f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 1 Apr 2019 18:51:17 -0700 Subject: [PATCH 164/819] Add postgres examples to TLS crates --- tokio-postgres-native-tls/Cargo.toml | 1 + tokio-postgres-native-tls/src/lib.rs | 35 +++++++++++++++++++++++----- tokio-postgres-openssl/Cargo.toml | 1 + tokio-postgres-openssl/src/lib.rs | 30 ++++++++++++++++++++---- 4 files changed, 57 insertions(+), 10 deletions(-) diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index 26f21f932..ec1da558a 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -24,3 +24,4 @@ tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-f [dev-dependencies] tokio = "0.1.7" +postgres = { version = "0.1.0", path = "../postgres" } diff --git a/tokio-postgres-native-tls/src/lib.rs b/tokio-postgres-native-tls/src/lib.rs index 1b873beae..b0fcf4b0d 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/tokio-postgres-native-tls/src/lib.rs @@ -1,18 +1,18 @@ -//! TLS support for `tokio-postgres` via `native-tls. +//! TLS support for `tokio-postgres` and `postgres` via `native-tls. //! -//! # Example +//! # Examples //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; //! use tokio_postgres_native_tls::MakeTlsConnector; //! use std::fs; //! -//! let cert = fs::read("database_cert.pem").unwrap(); -//! let cert = Certificate::from_pem(&cert).unwrap(); +//! # fn main() -> Result<(), Box> { +//! let cert = fs::read("database_cert.pem")?; +//! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() //! .add_root_certificate(cert) -//! .build() -//! .unwrap(); +//! .build()?; //! let connector = MakeTlsConnector::new(connector); //! //! let connect_future = tokio_postgres::connect( @@ -21,6 +21,29 @@ //! ); //! //! // ... +//! # Ok(()) +//! # } +//! ``` +//! +//! ```no_run +//! use native_tls::{Certificate, TlsConnector}; +//! use tokio_postgres_native_tls::MakeTlsConnector; +//! use std::fs; +//! +//! # fn main() -> Result<(), Box> { +//! let cert = fs::read("database_cert.pem")?; +//! let cert = Certificate::from_pem(&cert)?; +//! let connector = TlsConnector::builder() +//! .add_root_certificate(cert) +//! .build()?; +//! let connector = MakeTlsConnector::new(connector); +//! +//! let mut client = postgres::Client::connect( +//! "host=localhost user=postgres sslmode=require", +//! connector, +//! )?; +//! # Ok(()) +//! # } //! ``` #![doc(html_root_url = "https://docs.rs/tokio-postgres-native-tls/0.1.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index 88a140744..0ba6a8639 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -24,3 +24,4 @@ tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-f [dev-dependencies] tokio = "0.1.7" +postgres = { version = "0.1", path = "../postgres" } diff --git a/tokio-postgres-openssl/src/lib.rs b/tokio-postgres-openssl/src/lib.rs index f3c336cd6..220987417 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/tokio-postgres-openssl/src/lib.rs @@ -1,13 +1,14 @@ -//! TLS support for `tokio-postgres` via `openssl`. +//! TLS support for `tokio-postgres` and `postgres` via `openssl`. //! -//! # Example +//! # Examples //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; //! use tokio_postgres_openssl::MakeTlsConnector; //! -//! let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); -//! builder.set_ca_file("database_cert.pem").unwrap(); +//! # fn main() -> Result<(), Box> { +//! let mut builder = SslConnector::builder(SslMethod::tls())?; +//! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); //! //! let connect_future = tokio_postgres::connect( @@ -16,6 +17,27 @@ //! ); //! //! // ... +//! # Ok(()) +//! # } +//! ``` +//! +//! ```no_run +//! use openssl::ssl::{SslConnector, SslMethod}; +//! use tokio_postgres_openssl::MakeTlsConnector; +//! +//! # fn main() -> Result<(), Box> { +//! let mut builder = SslConnector::builder(SslMethod::tls())?; +//! builder.set_ca_file("database_cert.pem")?; +//! let connector = MakeTlsConnector::new(builder.build()); +//! +//! let mut client = postgres::Client::connect( +//! "host=localhost user=postgres sslmode=require", +//! connector, +//! )?; +//! +//! // ... +//! # Ok(()) +//! # } //! ``` #![doc(html_root_url = "https://docs.rs/tokio-postgres-openssl/0.1.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] From 6c0ec6a6bf2092cd1962fd8a5e069de31112f5a4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 1 Apr 2019 21:56:25 -0700 Subject: [PATCH 165/819] TLS docs for tokio-postgres --- postgres/src/lib.rs | 2 +- tokio-postgres/src/lib.rs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 41d85ad52..f643db6ed 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -50,7 +50,7 @@ //! //! TLS support is implemented via external libraries. `Client::connect` and `Config::connect` take a TLS implementation //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the -//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `postgres` and +//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `openssl` and //! `native-tls` crates, respectively. #![warn(clippy::all, rust_2018_idioms, missing_docs)] diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index bb1304c53..060e0c5d9 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -101,6 +101,13 @@ //! The client works with arbitrary `AsyncRead + AsyncWrite` streams. Convenience APIs are provided to handle the //! connection process, but these are gated by the `runtime` Cargo feature, which is enabled by default. If disabled, //! all dependence on the tokio runtime is removed. +//! +//! # SSL/TLS support +//! +//! TLS support is implemented via external libraries. `Client::connect` and `Config::connect` take a TLS implementation +//! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the +//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `openssl` and +//! `native-tls` crates, respectively. #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] From fd3f3feafc039bda0030c1546cf2088348f3cdbd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 3 Apr 2019 20:50:41 -0700 Subject: [PATCH 166/819] Move types tests to their own file --- .../src/{types.rs => types/mod.rs} | 163 +----------------- postgres-protocol/src/types/test.rs | 156 +++++++++++++++++ 2 files changed, 159 insertions(+), 160 deletions(-) rename postgres-protocol/src/{types.rs => types/mod.rs} (85%) create mode 100644 postgres-protocol/src/types/test.rs diff --git a/postgres-protocol/src/types.rs b/postgres-protocol/src/types/mod.rs similarity index 85% rename from postgres-protocol/src/types.rs rename to postgres-protocol/src/types/mod.rs index 7fac2b9d8..46000c407 100644 --- a/postgres-protocol/src/types.rs +++ b/postgres-protocol/src/types/mod.rs @@ -7,6 +7,9 @@ use std::str; use crate::{write_nullable, FromUsize, IsNull, Oid}; +#[cfg(test)] +mod test; + const RANGE_UPPER_UNBOUNDED: u8 = 0b0001_0000; const RANGE_LOWER_UNBOUNDED: u8 = 0b0000_1000; const RANGE_UPPER_INCLUSIVE: u8 = 0b0000_0100; @@ -953,163 +956,3 @@ impl<'a> FallibleIterator for PathPoints<'a> { (len, Some(len)) } } - -#[cfg(test)] -mod test { - use fallible_iterator::FallibleIterator; - use std::collections::HashMap; - - use super::*; - use crate::IsNull; - - #[test] - fn bool() { - let mut buf = vec![]; - bool_to_sql(true, &mut buf); - assert_eq!(bool_from_sql(&buf).unwrap(), true); - - let mut buf = vec![]; - bool_to_sql(false, &mut buf); - assert_eq!(bool_from_sql(&buf).unwrap(), false); - } - - #[test] - fn int2() { - let mut buf = vec![]; - int2_to_sql(0x0102, &mut buf); - assert_eq!(int2_from_sql(&buf).unwrap(), 0x0102); - } - - #[test] - fn int4() { - let mut buf = vec![]; - int4_to_sql(0x0102_0304, &mut buf); - assert_eq!(int4_from_sql(&buf).unwrap(), 0x0102_0304); - } - - #[test] - fn int8() { - let mut buf = vec![]; - int8_to_sql(0x0102_0304_0506_0708, &mut buf); - assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102_0304_0506_0708); - } - - #[test] - #[allow(clippy::float_cmp)] - fn float4() { - let mut buf = vec![]; - float4_to_sql(10343.95, &mut buf); - assert_eq!(float4_from_sql(&buf).unwrap(), 10343.95); - } - - #[test] - #[allow(clippy::float_cmp)] - fn float8() { - let mut buf = vec![]; - float8_to_sql(10343.95, &mut buf); - assert_eq!(float8_from_sql(&buf).unwrap(), 10343.95); - } - - #[test] - fn hstore() { - let mut map = HashMap::new(); - map.insert("hello", Some("world")); - map.insert("hola", None); - - let mut buf = vec![]; - hstore_to_sql(map.iter().map(|(&k, &v)| (k, v)), &mut buf).unwrap(); - assert_eq!( - hstore_from_sql(&buf) - .unwrap() - .collect::>() - .unwrap(), - map - ); - } - - #[test] - fn varbit() { - let len = 12; - let bits = [0b0010_1011, 0b0000_1111]; - - let mut buf = vec![]; - varbit_to_sql(len, bits.iter().cloned(), &mut buf).unwrap(); - let out = varbit_from_sql(&buf).unwrap(); - assert_eq!(out.len(), len); - assert_eq!(out.bytes(), bits); - } - - #[test] - fn array() { - let dimensions = [ - ArrayDimension { - len: 1, - lower_bound: 10, - }, - ArrayDimension { - len: 2, - lower_bound: 0, - }, - ]; - let values = [None, Some(&b"hello"[..])]; - - let mut buf = vec![]; - array_to_sql( - dimensions.iter().cloned(), - 10, - values.iter().cloned(), - |v, buf| match v { - Some(v) => { - buf.extend_from_slice(v); - Ok(IsNull::No) - } - None => Ok(IsNull::Yes), - }, - &mut buf, - ) - .unwrap(); - - let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), true); - assert_eq!(array.element_type(), 10); - assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); - assert_eq!(array.values().collect::>().unwrap(), values); - } - - #[test] - fn non_null_array() { - let dimensions = [ - ArrayDimension { - len: 1, - lower_bound: 10, - }, - ArrayDimension { - len: 2, - lower_bound: 0, - }, - ]; - let values = [Some(&b"hola"[..]), Some(&b"hello"[..])]; - - let mut buf = vec![]; - array_to_sql( - dimensions.iter().cloned(), - 10, - values.iter().cloned(), - |v, buf| match v { - Some(v) => { - buf.extend_from_slice(v); - Ok(IsNull::No) - } - None => Ok(IsNull::Yes), - }, - &mut buf, - ) - .unwrap(); - - let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), false); - assert_eq!(array.element_type(), 10); - assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); - assert_eq!(array.values().collect::>().unwrap(), values); - } -} diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs new file mode 100644 index 000000000..7d2563555 --- /dev/null +++ b/postgres-protocol/src/types/test.rs @@ -0,0 +1,156 @@ +use fallible_iterator::FallibleIterator; +use std::collections::HashMap; + +use super::*; +use crate::IsNull; + +#[test] +fn bool() { + let mut buf = vec![]; + bool_to_sql(true, &mut buf); + assert_eq!(bool_from_sql(&buf).unwrap(), true); + + let mut buf = vec![]; + bool_to_sql(false, &mut buf); + assert_eq!(bool_from_sql(&buf).unwrap(), false); +} + +#[test] +fn int2() { + let mut buf = vec![]; + int2_to_sql(0x0102, &mut buf); + assert_eq!(int2_from_sql(&buf).unwrap(), 0x0102); +} + +#[test] +fn int4() { + let mut buf = vec![]; + int4_to_sql(0x0102_0304, &mut buf); + assert_eq!(int4_from_sql(&buf).unwrap(), 0x0102_0304); +} + +#[test] +fn int8() { + let mut buf = vec![]; + int8_to_sql(0x0102_0304_0506_0708, &mut buf); + assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102_0304_0506_0708); +} + +#[test] +#[allow(clippy::float_cmp)] +fn float4() { + let mut buf = vec![]; + float4_to_sql(10343.95, &mut buf); + assert_eq!(float4_from_sql(&buf).unwrap(), 10343.95); +} + +#[test] +#[allow(clippy::float_cmp)] +fn float8() { + let mut buf = vec![]; + float8_to_sql(10343.95, &mut buf); + assert_eq!(float8_from_sql(&buf).unwrap(), 10343.95); +} + +#[test] +fn hstore() { + let mut map = HashMap::new(); + map.insert("hello", Some("world")); + map.insert("hola", None); + + let mut buf = vec![]; + hstore_to_sql(map.iter().map(|(&k, &v)| (k, v)), &mut buf).unwrap(); + assert_eq!( + hstore_from_sql(&buf) + .unwrap() + .collect::>() + .unwrap(), + map + ); +} + +#[test] +fn varbit() { + let len = 12; + let bits = [0b0010_1011, 0b0000_1111]; + + let mut buf = vec![]; + varbit_to_sql(len, bits.iter().cloned(), &mut buf).unwrap(); + let out = varbit_from_sql(&buf).unwrap(); + assert_eq!(out.len(), len); + assert_eq!(out.bytes(), bits); +} + +#[test] +fn array() { + let dimensions = [ + ArrayDimension { + len: 1, + lower_bound: 10, + }, + ArrayDimension { + len: 2, + lower_bound: 0, + }, + ]; + let values = [None, Some(&b"hello"[..])]; + + let mut buf = vec![]; + array_to_sql( + dimensions.iter().cloned(), + 10, + values.iter().cloned(), + |v, buf| match v { + Some(v) => { + buf.extend_from_slice(v); + Ok(IsNull::No) + } + None => Ok(IsNull::Yes), + }, + &mut buf, + ) + .unwrap(); + + let array = array_from_sql(&buf).unwrap(); + assert_eq!(array.has_nulls(), true); + assert_eq!(array.element_type(), 10); + assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); + assert_eq!(array.values().collect::>().unwrap(), values); +} + +#[test] +fn non_null_array() { + let dimensions = [ + ArrayDimension { + len: 1, + lower_bound: 10, + }, + ArrayDimension { + len: 2, + lower_bound: 0, + }, + ]; + let values = [Some(&b"hola"[..]), Some(&b"hello"[..])]; + + let mut buf = vec![]; + array_to_sql( + dimensions.iter().cloned(), + 10, + values.iter().cloned(), + |v, buf| match v { + Some(v) => { + buf.extend_from_slice(v); + Ok(IsNull::No) + } + None => Ok(IsNull::Yes), + }, + &mut buf, + ) + .unwrap(); + + let array = array_from_sql(&buf).unwrap(); + assert_eq!(array.has_nulls(), false); + assert_eq!(array.element_type(), 10); + assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); + assert_eq!(array.values().collect::>().unwrap(), values); +} From 956ba12b5497f79ef61d594ec23c1816a1a119b1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 3 Apr 2019 21:30:22 -0700 Subject: [PATCH 167/819] Conversions from INET to IpAddr We ignore the netmask when deserializing and use /32 or /128 when serializing. Closes #430 --- postgres-protocol/src/types/mod.rs | 88 ++++++++++++++++++++++++++ tokio-postgres/src/types/mod.rs | 27 ++++++++ tokio-postgres/tests/test/types/mod.rs | 31 +++++++++ 3 files changed, 146 insertions(+) diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 46000c407..0a93692ff 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -3,6 +3,8 @@ use byteorder::{BigEndian, ByteOrder, ReadBytesExt, WriteBytesExt}; use fallible_iterator::FallibleIterator; use std::boxed::Box as StdBox; use std::error::Error; +use std::io::Read; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; use crate::{write_nullable, FromUsize, IsNull, Oid}; @@ -16,6 +18,9 @@ const RANGE_UPPER_INCLUSIVE: u8 = 0b0000_0100; const RANGE_LOWER_INCLUSIVE: u8 = 0b0000_0010; const RANGE_EMPTY: u8 = 0b0000_0001; +const PGSQL_AF_INET: u8 = 2; +const PGSQL_AF_INET6: u8 = 3; + /// Serializes a `BOOL` value. #[inline] pub fn bool_to_sql(v: bool, buf: &mut Vec) { @@ -956,3 +961,86 @@ impl<'a> FallibleIterator for PathPoints<'a> { (len, Some(len)) } } + +/// Serializes a Postgres inet. +#[inline] +pub fn inet_to_sql(addr: IpAddr, netmask: u8, buf: &mut Vec) { + let family = match addr { + IpAddr::V4(_) => PGSQL_AF_INET, + IpAddr::V6(_) => PGSQL_AF_INET6, + }; + buf.push(family); + buf.push(netmask); + buf.push(0); // is_cidr + match addr { + IpAddr::V4(addr) => { + buf.push(4); + buf.extend_from_slice(&addr.octets()); + } + IpAddr::V6(addr) => { + buf.push(16); + buf.extend_from_slice(&addr.octets()); + } + } +} + +/// Deserializes a Postgres inet. +#[inline] +pub fn inet_from_sql(mut buf: &[u8]) -> Result> { + let family = buf.read_u8()?; + let netmask = buf.read_u8()?; + buf.read_u8()?; // is_cidr + let len = buf.read_u8()?; + + let addr = match family { + PGSQL_AF_INET => { + if netmask > 32 { + return Err("invalid IPv4 netmask".into()); + } + if len != 4 { + return Err("invalid IPv4 address length".into()); + } + let mut addr = [0; 4]; + buf.read_exact(&mut addr)?; + IpAddr::V4(Ipv4Addr::from(addr)) + } + PGSQL_AF_INET6 => { + if netmask > 128 { + return Err("invalid IPv6 netmask".into()); + } + if len != 16 { + return Err("invalid IPv6 address length".into()); + } + let mut addr = [0; 16]; + buf.read_exact(&mut addr)?; + IpAddr::V6(Ipv6Addr::from(addr)) + } + _ => return Err("invalid IP family".into()), + }; + + if !buf.is_empty() { + return Err("invalid buffer size".into()); + } + + Ok(Inet { addr, netmask }) +} + +/// A Postgres network address. +pub struct Inet { + addr: IpAddr, + netmask: u8, +} + +impl Inet { + /// Returns the IP address. + #[inline] + pub fn addr(&self) -> IpAddr { + self.addr + } + + /// Returns the netmask. + #[inline] + pub fn netmask(&self) -> u8 { + self.netmask + } +} diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 145a2530b..42599171e 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -8,6 +8,7 @@ use std::collections::HashMap; use std::error::Error; use std::fmt; use std::hash::BuildHasher; +use std::net::IpAddr; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -248,6 +249,7 @@ impl WrongType { /// | `&[u8]`/`Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `IpAddr` | INET | /// /// In addition, some implementations are provided for types in third party /// crates. These are disabled by default; to opt into one of these @@ -469,6 +471,15 @@ impl<'a> FromSql<'a> for SystemTime { accepts!(TIMESTAMP, TIMESTAMPTZ); } +impl<'a> FromSql<'a> for IpAddr { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(inet.addr()) + } + + accepts!(INET); +} + /// An enum representing the nullability of a Postgres value. pub enum IsNull { /// The value is NULL. @@ -498,6 +509,7 @@ pub enum IsNull { /// | `&[u8]`/Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `IpAddr` | INET | /// /// In addition, some implementations are provided for types in third party /// crates. These are disabled by default; to opt into one of these @@ -771,6 +783,21 @@ impl ToSql for SystemTime { to_sql_checked!(); } +impl ToSql for IpAddr { + fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + let netmask = match self { + IpAddr::V4(_) => 32, + IpAddr::V6(_) => 128, + }; + types::inet_to_sql(*self, netmask, w); + Ok(IsNull::No) + } + + accepts!(INET); + + to_sql_checked!(); +} + fn downcast(len: usize) -> Result> { if len > i32::max_value() as usize { Err("value too large to transmit".into()) diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index bf9870043..18858d568 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -4,6 +4,7 @@ use std::error::Error; use std::f32; use std::f64; use std::fmt; +use std::net::IpAddr; use std::result; use std::time::{Duration, UNIX_EPOCH}; use tokio::runtime::current_thread::Runtime; @@ -624,3 +625,33 @@ fn system_time() { ], ); } + +#[test] +fn inet() { + test_type( + "INET", + &[ + (Some("127.0.0.1".parse::().unwrap()), "'127.0.0.1'"), + ( + Some("127.0.0.1".parse::().unwrap()), + "'127.0.0.1/32'", + ), + ( + Some( + "2001:4f8:3:ba:2e0:81ff:fe22:d1f1" + .parse::() + .unwrap(), + ), + "'2001:4f8:3:ba:2e0:81ff:fe22:d1f1'", + ), + ( + Some( + "2001:4f8:3:ba:2e0:81ff:fe22:d1f1" + .parse::() + .unwrap(), + ), + "'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128'", + ), + ], + ); +} From 0c8ecc024056a226a4496f8e4e4a24b343f1b894 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 6 Apr 2019 20:59:01 -0700 Subject: [PATCH 168/819] Release postgres v0.16.0-rc.1 --- postgres/CHANGELOG.md | 98 +++++++++++++ postgres/Cargo.toml | 15 +- postgres/LICENSE-APACHE | 201 +++++++++++++++++++++++++++ postgres/LICENSE-MIT | 22 +++ postgres/src/lib.rs | 1 + tokio-postgres-native-tls/Cargo.toml | 2 +- tokio-postgres-openssl/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 4 +- 8 files changed, 339 insertions(+), 6 deletions(-) create mode 100644 postgres/CHANGELOG.md create mode 100644 postgres/LICENSE-APACHE create mode 100644 postgres/LICENSE-MIT diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md new file mode 100644 index 000000000..1bfc907dc --- /dev/null +++ b/postgres/CHANGELOG.md @@ -0,0 +1,98 @@ +# Change Log + +## [Unreleased] + +## [v0.16.0-rc.1] - 2019-04-06 + +### Changed + +* `Connection` has been renamed to `Client`. +* The `Client` type is now a thin wrapper around the tokio-postgres nonblocking client. By default, this is handled + transparently by spawning connections onto an internal tokio `Runtime`, but this can also be controlled explicitly. +* The `ConnectParams` type and `IntoConnectParams` trait have been replaced by a builder-style `Config` type. + + Before: + ```rust + let params = ConnectParams::builder() + .user("postgres", None) + .build(Host::Tcp("localhost".to_string())) + .build(); + let conn = Connection::connect(params, &TlsMode::None)?; + ``` + After: + ```rust + let client = Client::configure() + .user("postgres") + .host("localhost") + .connect(NoTls)?; + ``` +* The TLS connection mode (e.g. `prefer`) is now part of the connection configuration instead of being passed in + separately. + + Before: + ```rust + let conn = Connection::connect("postgres://postgres@localhost", &TlsMode::Prefer(connector))?; + ``` + After: + ```rust + let client = Client::connect("postgres://postgres@localhost?sslmode=prefer", connector)?; + ``` +* `Client` and `Transaction` methods take `&mut self` rather than `&self`, and correct use of the active transaction is + verified at compile time rather than runtime. +* `Row` no longer borrows any data. +* `Statement` is now a "token" which is passed into methods on `Client` and `Transaction` and does not borrow the + client: + + Before: + ```rust + let statement = conn.prepare("SELECT * FROM foo WHERE bar = $1")?; + let rows = statement.query(&[&1i32])?; + ``` + After: + ```rust + let statement = client.prepare("SELECT * FROM foo WHERE bar = $1")?; + let rows = client.query(&statement, &[1i32])?; + ``` +* `Statement::lazy_query` has been replaced with `Transaction::bind`, which returns a `Portal` type that can be used + with `Transaction::query_portal`. +* `Statement::copy_in` and `Statement::copy_out` have been moved to `Client` and `Transaction`. +* `Client::copy_out` and `Transaction::copy_out` now return a `Read`er rather than consuming in a `Write`r. +* `Connection::batch_execute` and `Transaction::batch_execute` have been replaced with `Client::simple_query` and + `Transaction::simple_query`. +* The Cargo features enabling `ToSql` and `FromSql` implementations for external crates are now versioned. For example, + `with-uuid` is now `with-uuid-0_7`. This enables us to add support for new major versions of the crates in parallel + without breaking backwards compatibility. + +### Added + +* Connection string configuration now more fully mirrors libpq's syntax, and supports both URL-style and key-value style + strings. +* `FromSql` implementations can now borrow from the data buffer. In particular, this means that you can deserialize + values as `&str`. The `FromSqlOwned` trait can be used as a bound to restrict code to deserializing owned values. +* Added support for channel binding with SCRAM authentication. +* Added multi-host support in connection configuration. +* Added support for simple query requests returning row data. +* Added variants of query methods which return fallible iterators of values and avoid fully buffering the response in + memory. + +### Removed + +* The `with-openssl` and `with-native-tls` Cargo features have been removed. Use the `tokio-postgres-openssl` and + `tokio-postgres-native-tls` crates instead. +* The `with-rustc_serialize` and `with-time` Cargo features have been removed. Use `serde` and `SystemTime` or `chrono` + instead. +* The `Transaction::set_commit` and `Transaction::set_rollback` methods have been removed. The only way to commit a + transaction is to explicitly consume it via `Transaction::commit`. +* The `Rows` type has been removed; methods now return `Vec` instead. +* `Connection::prepare_cache` has been removed, as `Statement` is now `'static` and can be more easily cached + externally. +* Some other slightly more obscure features have been removed in the initial release. If you depended on them, please + file an issue and we can find the right design to add them back! + +## Older + +Look at the [release tags] for information about older releases. + +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-v0.16.0-rc.1...master +[v0.16.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/postgres-v0.15.2...postgres-v0.16.0-rc.1 +[release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 211ae7218..dc9292072 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,12 +1,23 @@ [package] name = "postgres" -version = "0.1.0" +version = "0.16.0-rc.1" authors = ["Steven Fackler "] edition = "2018" +license = "MIT/Apache-2.0" +description = "A native, synchronous PostgreSQL client" +repository = "https://github.com/sfackler/rust-postgres" +readme = "../README.md" +keywords = ["database", "postgres", "postgresql", "sql"] +categories = ["database"] + +[package.metadata.docs.rs] +all-features = true + +[badges] +circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] - runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] "with-bit-vec-0_5" = ["tokio-postgres/with-bit-vec-0_5"] diff --git a/postgres/LICENSE-APACHE b/postgres/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/postgres/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/postgres/LICENSE-MIT b/postgres/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/postgres/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index f643db6ed..c4305a2ce 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -52,6 +52,7 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `openssl` and //! `native-tls` crates, respectively. +#![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.1")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] #[cfg(feature = "runtime")] diff --git a/tokio-postgres-native-tls/Cargo.toml b/tokio-postgres-native-tls/Cargo.toml index ec1da558a..ec2b91069 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/tokio-postgres-native-tls/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-f [dev-dependencies] tokio = "0.1.7" -postgres = { version = "0.1.0", path = "../postgres" } +postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/tokio-postgres-openssl/Cargo.toml b/tokio-postgres-openssl/Cargo.toml index 0ba6a8639..8e1ba0750 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/tokio-postgres-openssl/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-f [dev-dependencies] tokio = "0.1.7" -postgres = { version = "0.1", path = "../postgres" } +postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 22b5197be..6b833bccc 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -14,8 +14,6 @@ * The client API has been significantly overhauled. It now resembles `hyper`'s, with separate `Connection` and `Client` objects. See the crate-level documentation for more details. -* Connection string configuration now fully mirrors libpq's syntax, and supports both URL-style and key-value style - strings. * The TLS connection mode (e.g. `prefer`) is now part of the connection configuration rather than being passed in separately. * The Cargo features enabling `ToSql` and `FromSql` implementations for external crates are now versioned. For example, @@ -25,6 +23,8 @@ ### Added +* Connection string configuration now more fully mirrors libpq's syntax, and supports both URL-style and key-value style + strings. * `FromSql` implementations can now borrow from the data buffer. In particular, this means that you can deserialize values as `&str`. The `FromSqlOwned` trait can be used as a bound to restrict code to deserializing owned values. * Added support for channel binding with SCRAM authentication. From eb18cbd4759965125dffbf84b14be16a2655de13 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 6 Apr 2019 21:26:02 -0700 Subject: [PATCH 169/819] Reexport Socket from postgres --- postgres/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index c4305a2ce..67304ba83 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -60,6 +60,8 @@ use lazy_static::lazy_static; #[cfg(feature = "runtime")] use tokio::runtime::{self, Runtime}; +#[cfg(feature = "runtime")] +pub use tokio_postgres::Socket; pub use tokio_postgres::{error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement}; pub use crate::client::*; From 1270524df29f59277f8c20bca6ad2c196dc63ca4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 11 Apr 2019 19:51:42 -0700 Subject: [PATCH 170/819] Fix new clippy warnings --- tokio-postgres/src/config.rs | 2 +- tokio-postgres/src/error/mod.rs | 2 +- tokio-postgres/src/proto/client.rs | 2 +- tokio-postgres/src/types/mod.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 76899725f..96c6cfca3 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -489,7 +489,7 @@ impl<'a> Parser<'a> { } fn skip_ws(&mut self) { - self.take_while(|c| c.is_whitespace()); + self.take_while(char::is_whitespace); } fn take_while(&mut self, f: F) -> &'a str diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 3e9545434..7fadf625a 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -413,7 +413,7 @@ impl Error { pub fn code(&self) -> Option<&SqlState> { self.source() .and_then(|e| e.downcast_ref::()) - .map(|e| e.code()) + .map(DbError::code) } fn new(kind: Kind, cause: Option>) -> Error { diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index f1e46309f..051d5160f 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -154,7 +154,7 @@ impl Client { pub fn prepare(&self, name: String, query: &str, param_types: &[Type]) -> PrepareFuture { let pending = self.pending(|buf| { - frontend::parse(&name, query, param_types.iter().map(|t| t.oid()), buf) + frontend::parse(&name, query, param_types.iter().map(Type::oid), buf) .map_err(Error::parse)?; frontend::describe(b'S', &name, buf).map_err(Error::parse)?; frontend::sync(buf); diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 42599171e..a13e59bfa 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -386,7 +386,7 @@ impl<'a> FromSql<'a> for &'a [u8] { impl<'a> FromSql<'a> for String { fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { - types::text_from_sql(raw).map(|b| b.to_owned()) + types::text_from_sql(raw).map(ToString::to_string) } fn accepts(ty: &Type) -> bool { From d8842982b046775a78158ad46dbd6967c0f63140 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 20 May 2019 19:05:54 -0700 Subject: [PATCH 171/819] Include the parameter/column index in type conversion errors Closes #442 --- tokio-postgres/src/error/mod.rs | 45 +++++++++++++++--------------- tokio-postgres/src/proto/client.rs | 12 +++++--- tokio-postgres/src/row.rs | 6 ++-- 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 7fadf625a..199f1b678 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -334,8 +334,8 @@ enum Kind { Io, UnexpectedMessage, Tls, - ToSql, - FromSql, + ToSql(usize), + FromSql(usize), Column, CopyInStream, Closed, @@ -368,25 +368,24 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self.0.kind { - Kind::Io => "error communicating with the server", - Kind::UnexpectedMessage => "unexpected message from server", - Kind::Tls => "error performing TLS handshake", - Kind::ToSql => "error serializing a value", - Kind::FromSql => "error deserializing a value", - Kind::Column => "invalid column", - Kind::CopyInStream => "error from a copy_in stream", - Kind::Closed => "connection closed", - Kind::Db => "db error", - Kind::Parse => "error parsing response from server", - Kind::Encode => "error encoding message to server", - Kind::Authentication => "authentication error", - Kind::ConfigParse => "invalid connection string", - Kind::Config => "invalid configuration", + match self.0.kind { + Kind::Io => fmt.write_str("error communicating with the server")?, + Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, + Kind::Tls => fmt.write_str("error performing TLS handshake")?, + Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, + Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, + Kind::Column => fmt.write_str("invalid column")?, + Kind::CopyInStream => fmt.write_str("error from a copy_in stream")?, + Kind::Closed => fmt.write_str("connection closed")?, + Kind::Db => fmt.write_str("db error")?, + Kind::Parse => fmt.write_str("error parsing response from server")?, + Kind::Encode => fmt.write_str("error encoding message to server")?, + Kind::Authentication => fmt.write_str("authentication error")?, + Kind::ConfigParse => fmt.write_str("invalid connection string")?, + Kind::Config => fmt.write_str("invalid configuration")?, #[cfg(feature = "runtime")] - Kind::Connect => "error connecting to server", + Kind::Connect => fmt.write_str("error connecting to server")?, }; - fmt.write_str(s)?; if let Some(ref cause) = self.0.cause { write!(fmt, ": {}", cause)?; } @@ -445,12 +444,12 @@ impl Error { } #[allow(clippy::wrong_self_convention)] - pub(crate) fn to_sql(e: Box) -> Error { - Error::new(Kind::ToSql, Some(e)) + pub(crate) fn to_sql(e: Box, idx: usize) -> Error { + Error::new(Kind::ToSql(idx), Some(e)) } - pub(crate) fn from_sql(e: Box) -> Error { - Error::new(Kind::FromSql, Some(e)) + pub(crate) fn from_sql(e: Box, idx: usize) -> Error { + Error::new(Kind::FromSql(idx), Some(e)) } pub(crate) fn column() -> Error { diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 051d5160f..c3c011e86 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -298,22 +298,26 @@ impl Client { ); let mut buf = vec![]; + let mut error_idx = 0; let r = frontend::bind( name, statement.name(), Some(1), - params.iter().zip(statement.params()), - |(param, ty), buf| match param.to_sql_checked(ty, buf) { + params.iter().zip(statement.params()).enumerate(), + |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), - Err(e) => Err(e), + Err(e) => { + error_idx = idx; + Err(e) + } }, Some(1), &mut buf, ); match r { Ok(()) => Ok(buf), - Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e)), + Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e, error_idx)), Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)), } } diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 0dd4c0520..aeb9c2849 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -167,11 +167,11 @@ impl Row { let ty = self.columns()[idx].type_(); if !T::accepts(ty) { - return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())))); + return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())), idx)); } let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - FromSql::from_sql_nullable(ty, buf).map_err(Error::from_sql) + FromSql::from_sql_nullable(ty, buf).map_err(|e| Error::from_sql(e, idx)) } } @@ -238,6 +238,6 @@ impl SimpleQueryRow { }; let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(Error::from_sql) + FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx)) } } From 4a5d30b4c6b94b81a3979a8be65cdef258480778 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Jun 2019 21:22:03 -0700 Subject: [PATCH 172/819] Buffer copy_in messages Otherwise there's a ton of overhead passing tons of tiny messages over channels. --- .circleci/config.yml | 2 +- codegen/src/main.rs | 1 + tokio-postgres/src/proto/client.rs | 4 +- tokio-postgres/src/proto/copy_in.rs | 84 +++++++++++++++++++---------- 4 files changed, 60 insertions(+), 31 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b2d7fb732..3acad7821 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.31.0 + - image: rust:1.35.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:5 diff --git a/codegen/src/main.rs b/codegen/src/main.rs index e6559aca1..6d99ded60 100644 --- a/codegen/src/main.rs +++ b/codegen/src/main.rs @@ -1,4 +1,5 @@ #![warn(clippy::all)] +#![allow(clippy::write_with_newline)] extern crate linked_hash_map; extern crate marksman_escape; diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index c3c011e86..a328e90d1 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -212,8 +212,8 @@ impl Client { S::Error: Into>, { let (mut sender, receiver) = mpsc::channel(1); - let pending = PendingRequest(self.excecute_message(statement, params).map(|buf| { - match sender.start_send(CopyMessage::Data(buf)) { + let pending = PendingRequest(self.excecute_message(statement, params).map(|data| { + match sender.start_send(CopyMessage { data, done: false }) { Ok(AsyncSink::Ready) => {} _ => unreachable!("channel should have capacity"), } diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs index 2f75596a4..33018aa15 100644 --- a/tokio-postgres/src/proto/copy_in.rs +++ b/tokio-postgres/src/proto/copy_in.rs @@ -6,14 +6,15 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::error::Error as StdError; +use std::mem; use crate::proto::client::{Client, PendingRequest}; use crate::proto::statement::Statement; use crate::Error; -pub enum CopyMessage { - Data(Vec), - Done, +pub struct CopyMessage { + pub data: Vec, + pub done: bool, } pub struct CopyInReceiver { @@ -40,13 +41,14 @@ impl Stream for CopyInReceiver { } match self.receiver.poll()? { - Async::Ready(Some(CopyMessage::Data(buf))) => Ok(Async::Ready(Some(buf))), - Async::Ready(Some(CopyMessage::Done)) => { - self.done = true; - let mut buf = vec![]; - frontend::copy_done(&mut buf); - frontend::sync(&mut buf); - Ok(Async::Ready(Some(buf))) + Async::Ready(Some(mut data)) => { + if data.done { + self.done = true; + frontend::copy_done(&mut data.data); + frontend::sync(&mut data.data); + } + + Ok(Async::Ready(Some(data.data))) } Async::Ready(None) => { self.done = true; @@ -85,6 +87,7 @@ where #[state_machine_future(transitions(WriteCopyDone))] WriteCopyData { stream: S, + buf: Vec, pending_message: Option, sender: mpsc::Sender, receiver: mpsc::Receiver, @@ -133,6 +136,7 @@ where let state = state.take(); transition!(WriteCopyData { stream: state.stream, + buf: vec![], pending_message: None, sender: state.sender, receiver: state.receiver @@ -148,34 +152,58 @@ where fn poll_write_copy_data<'a>( state: &'a mut RentToOwn<'a, WriteCopyData>, ) -> Poll { + if let Some(message) = state.pending_message.take() { + match state + .sender + .start_send(message) + .map_err(|_| Error::closed())? + { + AsyncSink::Ready => {} + AsyncSink::NotReady(message) => { + state.pending_message = Some(message); + return Ok(Async::NotReady); + } + } + } + loop { - let message = match state.pending_message.take() { - Some(message) => message, - None => match try_ready!(state.stream.poll().map_err(Error::copy_in_stream)) { + let done = loop { + match try_ready!(state.stream.poll().map_err(Error::copy_in_stream)) { Some(data) => { - let mut buf = vec![]; // FIXME avoid collect - frontend::copy_data(&data.into_buf().collect::>(), &mut buf) + frontend::copy_data(&data.into_buf().collect::>(), &mut state.buf) .map_err(Error::encode)?; - CopyMessage::Data(buf) + if state.buf.len() > 4096 { + break false; + } } - None => { - let state = state.take(); - transition!(WriteCopyDone { - future: state.sender.send(CopyMessage::Done), - receiver: state.receiver - }) - } - }, + None => break true, + } }; - match state.sender.start_send(message) { - Ok(AsyncSink::Ready) => {} - Ok(AsyncSink::NotReady(message)) => { + let message = CopyMessage { + data: mem::replace(&mut state.buf, vec![]), + done, + }; + + if done { + let state = state.take(); + transition!(WriteCopyDone { + future: state.sender.send(message), + receiver: state.receiver, + }); + } + + match state + .sender + .start_send(message) + .map_err(|_| Error::closed())? + { + AsyncSink::Ready => {} + AsyncSink::NotReady(message) => { state.pending_message = Some(message); return Ok(Async::NotReady); } - Err(_) => return Err(Error::closed()), } } } From db462eb0180df8561a2a8e62c7d4e2d8b1efde28 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 25 Jun 2019 18:54:17 -0700 Subject: [PATCH 173/819] Avoid copies in copy_in copy_in data was previously copied ~3 times - once into the copy_in buffer, once more to frame it into a CopyData frame, and once to write that into the stream. Our Codec is now a bit more interesting. Rather than just writing out pre-encoded data, we can also send along unencoded CopyData so they can be framed directly into the stream output buffer. In the future we can extend this to e.g. avoid allocating for simple commands like Sync. This also allows us to directly pass large copy_in input directly through without rebuffering it. --- postgres-protocol/src/message/frontend.rs | 36 ++++++++ tokio-postgres/src/impls.rs | 2 +- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/src/proto/client.rs | 28 +++--- tokio-postgres/src/proto/codec.rs | 18 +++- tokio-postgres/src/proto/connect_raw.rs | 12 +-- tokio-postgres/src/proto/connection.rs | 8 +- tokio-postgres/src/proto/copy_in.rs | 103 ++++++++++++---------- tokio-postgres/src/proto/mod.rs | 2 +- tokio-postgres/tests/test/main.rs | 44 +++++++++ 10 files changed, 180 insertions(+), 75 deletions(-) diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 0ff0ddb65..6daed7d9d 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -2,6 +2,8 @@ #![allow(missing_docs)] use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; +use bytes::{Buf, BufMut, BytesMut, IntoBuf}; +use std::convert::TryFrom; use std::error::Error; use std::io; use std::marker; @@ -263,6 +265,40 @@ pub fn copy_data(data: &[u8], buf: &mut Vec) -> io::Result<()> { }) } +pub struct CopyData { + buf: T, + len: i32, +} + +impl CopyData +where + T: Buf, +{ + pub fn new(buf: U) -> io::Result> + where + U: IntoBuf, + { + let buf = buf.into_buf(); + + let len = buf + .remaining() + .checked_add(4) + .and_then(|l| i32::try_from(l).ok()) + .ok_or_else(|| { + io::Error::new(io::ErrorKind::InvalidInput, "message length overflow") + })?; + + Ok(CopyData { buf, len }) + } + + pub fn write(self, out: &mut BytesMut) { + out.reserve(self.len as usize + 1); + out.put_u8(b'd'); + out.put_i32_be(self.len); + out.put(self.buf); + } +} + #[inline] pub fn copy_done(buf: &mut Vec) { buf.push(b'c'); diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs index 546c3d7da..520a4aff8 100644 --- a/tokio-postgres/src/impls.rs +++ b/tokio-postgres/src/impls.rs @@ -170,7 +170,7 @@ pub struct CopyIn(pub(crate) proto::CopyInFuture) where S: Stream, S::Item: IntoBuf, - ::Buf: Send, + ::Buf: 'static + Send, S::Error: Into>; impl Future for CopyIn diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 060e0c5d9..78c653311 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -242,7 +242,7 @@ impl Client { where S: Stream, S::Item: IntoBuf, - ::Buf: Send, + ::Buf: 'static + Send, // FIXME error type? S::Error: Into>, { diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index a328e90d1..abb2f5341 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -11,6 +11,7 @@ use std::sync::{Arc, Weak}; use tokio_io::{AsyncRead, AsyncWrite}; use crate::proto::bind::BindFuture; +use crate::proto::codec::FrontendMessage; use crate::proto::connection::{Request, RequestMessages}; use crate::proto::copy_in::{CopyInFuture, CopyInReceiver, CopyMessage}; use crate::proto::copy_out::CopyOutStream; @@ -185,8 +186,12 @@ impl Client { if let Ok(ref mut buf) = buf { frontend::sync(buf); } - let pending = - PendingRequest(buf.map(|m| (RequestMessages::Single(m), self.0.idle.guard()))); + let pending = PendingRequest(buf.map(|m| { + ( + RequestMessages::Single(FrontendMessage::Raw(m)), + self.0.idle.guard(), + ) + })); BindFuture::new(self.clone(), pending, name, statement.clone()) } @@ -208,12 +213,12 @@ impl Client { where S: Stream, S::Item: IntoBuf, - ::Buf: Send, + ::Buf: 'static + Send, S::Error: Into>, { let (mut sender, receiver) = mpsc::channel(1); let pending = PendingRequest(self.excecute_message(statement, params).map(|data| { - match sender.start_send(CopyMessage { data, done: false }) { + match sender.start_send(CopyMessage::Message(data)) { Ok(AsyncSink::Ready) => {} _ => unreachable!("channel should have capacity"), } @@ -278,7 +283,7 @@ impl Client { frontend::sync(&mut buf); let (sender, _) = mpsc::channel(0); let _ = self.0.sender.unbounded_send(Request { - messages: RequestMessages::Single(buf), + messages: RequestMessages::Single(FrontendMessage::Raw(buf)), sender, idle: None, }); @@ -326,11 +331,11 @@ impl Client { &self, statement: &Statement, params: &[&dyn ToSql], - ) -> Result, Error> { + ) -> Result { let mut buf = self.bind_message(statement, "", params)?; frontend::execute("", 0, &mut buf).map_err(Error::parse)?; frontend::sync(&mut buf); - Ok(buf) + Ok(FrontendMessage::Raw(buf)) } fn pending(&self, messages: F) -> PendingRequest @@ -338,8 +343,11 @@ impl Client { F: FnOnce(&mut Vec) -> Result<(), Error>, { let mut buf = vec![]; - PendingRequest( - messages(&mut buf).map(|()| (RequestMessages::Single(buf), self.0.idle.guard())), - ) + PendingRequest(messages(&mut buf).map(|()| { + ( + RequestMessages::Single(FrontendMessage::Raw(buf)), + self.0.idle.guard(), + ) + })) } } diff --git a/tokio-postgres/src/proto/codec.rs b/tokio-postgres/src/proto/codec.rs index 4e37ab603..c7c6d9045 100644 --- a/tokio-postgres/src/proto/codec.rs +++ b/tokio-postgres/src/proto/codec.rs @@ -1,16 +1,26 @@ -use bytes::BytesMut; +use bytes::{Buf, BytesMut}; use postgres_protocol::message::backend; +use postgres_protocol::message::frontend::CopyData; use std::io; use tokio_codec::{Decoder, Encoder}; +pub enum FrontendMessage { + Raw(Vec), + CopyData(CopyData>), +} + pub struct PostgresCodec; impl Encoder for PostgresCodec { - type Item = Vec; + type Item = FrontendMessage; type Error = io::Error; - fn encode(&mut self, item: Vec, dst: &mut BytesMut) -> Result<(), io::Error> { - dst.extend_from_slice(&item); + fn encode(&mut self, item: FrontendMessage, dst: &mut BytesMut) -> Result<(), io::Error> { + match item { + FrontendMessage::Raw(buf) => dst.extend_from_slice(&buf), + FrontendMessage::CopyData(data) => data.write(dst), + } + Ok(()) } } diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs index 45b2c3e58..85368cd9f 100644 --- a/tokio-postgres/src/proto/connect_raw.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -11,7 +11,7 @@ use std::collections::HashMap; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::{Client, Connection, MaybeTlsStream, PostgresCodec, TlsFuture}; +use crate::proto::{Client, Connection, MaybeTlsStream, PostgresCodec, TlsFuture, FrontendMessage}; use crate::tls::ChannelBinding; use crate::{Config, Error, TlsConnect}; @@ -111,7 +111,7 @@ where let stream = Framed::new(stream, PostgresCodec); transition!(SendingStartup { - future: stream.send(buf), + future: stream.send(FrontendMessage::Raw(buf)), config: state.config, idx: state.idx, channel_binding, @@ -156,7 +156,7 @@ where let mut buf = vec![]; frontend::password_message(pass, &mut buf).map_err(Error::encode)?; transition!(SendingPassword { - future: state.stream.send(buf), + future: state.stream.send(FrontendMessage::Raw(buf)), config: state.config, idx: state.idx, }) @@ -178,7 +178,7 @@ where let mut buf = vec![]; frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; transition!(SendingPassword { - future: state.stream.send(buf), + future: state.stream.send(FrontendMessage::Raw(buf)), config: state.config, idx: state.idx, }) @@ -235,7 +235,7 @@ where .map_err(Error::encode)?; transition!(SendingSasl { - future: state.stream.send(buf), + future: state.stream.send(FrontendMessage::Raw(buf)), scram, config: state.config, idx: state.idx, @@ -293,7 +293,7 @@ where let mut buf = vec![]; frontend::sasl_response(state.scram.message(), &mut buf).map_err(Error::encode)?; transition!(SendingSasl { - future: state.stream.send(buf), + future: state.stream.send(FrontendMessage::Raw(buf)), scram: state.scram, config: state.config, idx: state.idx, diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index dd9f30fe4..14559fd0a 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -8,17 +8,17 @@ use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::codec::PostgresCodec; +use crate::proto::codec::{FrontendMessage, PostgresCodec}; use crate::proto::copy_in::CopyInReceiver; use crate::proto::idle::IdleGuard; use crate::{AsyncMessage, Notification}; use crate::{DbError, Error}; pub enum RequestMessages { - Single(Vec), + Single(FrontendMessage), CopyIn { receiver: CopyInReceiver, - pending_message: Option>, + pending_message: Option, }, } @@ -188,7 +188,7 @@ where self.state = State::Terminating; let mut request = vec![]; frontend::terminate(&mut request); - RequestMessages::Single(request) + RequestMessages::Single(FrontendMessage::Raw(request)) } Async::Ready(None) => { trace!( diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs index 33018aa15..80f09c520 100644 --- a/tokio-postgres/src/proto/copy_in.rs +++ b/tokio-postgres/src/proto/copy_in.rs @@ -1,20 +1,21 @@ -use bytes::{Buf, IntoBuf}; +use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use futures::sink; +use futures::stream; use futures::sync::mpsc; use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; use postgres_protocol::message::backend::Message; -use postgres_protocol::message::frontend; +use postgres_protocol::message::frontend::{self, CopyData}; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::error::Error as StdError; -use std::mem; use crate::proto::client::{Client, PendingRequest}; +use crate::proto::codec::FrontendMessage; use crate::proto::statement::Statement; use crate::Error; -pub struct CopyMessage { - pub data: Vec, - pub done: bool, +pub enum CopyMessage { + Message(FrontendMessage), + Done, } pub struct CopyInReceiver { @@ -32,30 +33,29 @@ impl CopyInReceiver { } impl Stream for CopyInReceiver { - type Item = Vec; + type Item = FrontendMessage; type Error = (); - fn poll(&mut self) -> Poll>, ()> { + fn poll(&mut self) -> Poll, ()> { if self.done { return Ok(Async::Ready(None)); } match self.receiver.poll()? { - Async::Ready(Some(mut data)) => { - if data.done { - self.done = true; - frontend::copy_done(&mut data.data); - frontend::sync(&mut data.data); - } - - Ok(Async::Ready(Some(data.data))) + Async::Ready(Some(CopyMessage::Message(message))) => Ok(Async::Ready(Some(message))), + Async::Ready(Some(CopyMessage::Done)) => { + self.done = true; + let mut buf = vec![]; + frontend::copy_done(&mut buf); + frontend::sync(&mut buf); + Ok(Async::Ready(Some(FrontendMessage::Raw(buf)))) } Async::Ready(None) => { self.done = true; let mut buf = vec![]; frontend::copy_fail("", &mut buf).unwrap(); frontend::sync(&mut buf); - Ok(Async::Ready(Some(buf))) + Ok(Async::Ready(Some(FrontendMessage::Raw(buf)))) } Async::NotReady => Ok(Async::NotReady), } @@ -67,7 +67,7 @@ pub enum CopyIn where S: Stream, S::Item: IntoBuf, - ::Buf: Send, + ::Buf: 'static + Send, S::Error: Into>, { #[state_machine_future(start, transitions(ReadCopyInResponse))] @@ -86,8 +86,8 @@ where }, #[state_machine_future(transitions(WriteCopyDone))] WriteCopyData { - stream: S, - buf: Vec, + stream: stream::Fuse, + buf: BytesMut, pending_message: Option, sender: mpsc::Sender, receiver: mpsc::Receiver, @@ -109,7 +109,7 @@ impl PollCopyIn for CopyIn where S: Stream, S::Item: IntoBuf, - ::Buf: Send, + ::Buf: 'static + Send, S::Error: Into>, { fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { @@ -135,8 +135,8 @@ where Some(Message::CopyInResponse(_)) => { let state = state.take(); transition!(WriteCopyData { - stream: state.stream, - buf: vec![], + stream: state.stream.fuse(), + buf: BytesMut::new(), pending_message: None, sender: state.sender, receiver: state.receiver @@ -167,44 +167,51 @@ where } loop { - let done = loop { + let buf: Box = loop { match try_ready!(state.stream.poll().map_err(Error::copy_in_stream)) { - Some(data) => { - // FIXME avoid collect - frontend::copy_data(&data.into_buf().collect::>(), &mut state.buf) - .map_err(Error::encode)?; + Some(buf) => { + let buf = buf.into_buf(); + if buf.remaining() > 4096 { + if state.buf.is_empty() { + break Box::new(buf); + } else { + let cur_buf = state.buf.take().freeze().into_buf(); + break Box::new(cur_buf.chain(buf)); + } + } + + state.buf.reserve(buf.remaining()); + state.buf.put(buf); if state.buf.len() > 4096 { - break false; + break Box::new(state.buf.take().freeze().into_buf()); } } - None => break true, + None => break Box::new(state.buf.take().freeze().into_buf()), } }; - let message = CopyMessage { - data: mem::replace(&mut state.buf, vec![]), - done, - }; + if buf.has_remaining() { + let data = CopyData::new(buf).map_err(Error::encode)?; + let message = CopyMessage::Message(FrontendMessage::CopyData(data)); - if done { + match state + .sender + .start_send(message) + .map_err(|_| Error::closed())? + { + AsyncSink::Ready => {} + AsyncSink::NotReady(message) => { + state.pending_message = Some(message); + return Ok(Async::NotReady); + } + } + } else { let state = state.take(); transition!(WriteCopyDone { - future: state.sender.send(message), + future: state.sender.send(CopyMessage::Done), receiver: state.receiver, }); } - - match state - .sender - .start_send(message) - .map_err(|_| Error::closed())? - { - AsyncSink::Ready => {} - AsyncSink::NotReady(message) => { - state.pending_message = Some(message); - return Ok(Async::NotReady); - } - } } } diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 7667901c3..7c30cda85 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -53,7 +53,7 @@ pub use crate::proto::bind::BindFuture; pub use crate::proto::cancel_query::CancelQueryFuture; pub use crate::proto::cancel_query_raw::CancelQueryRawFuture; pub use crate::proto::client::Client; -pub use crate::proto::codec::PostgresCodec; +pub use crate::proto::codec::{FrontendMessage, PostgresCodec}; #[cfg(feature = "runtime")] pub use crate::proto::connect::ConnectFuture; #[cfg(feature = "runtime")] diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 7930b1b95..06a6d7382 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -4,6 +4,7 @@ use futures::sync::mpsc; use futures::{future, stream, try_ready}; use log::debug; use std::error::Error; +use std::fmt::Write; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Duration, Instant}; use tokio::net::TcpStream; @@ -616,6 +617,49 @@ fn copy_in() { assert_eq!(rows[1].get::<_, &str>(1), "joe"); } +#[test] +fn copy_in_large() { + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); + let connection = connection.map_err(|e| panic!("{}", e)); + runtime.handle().spawn(connection).unwrap(); + + runtime + .block_on( + client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .for_each(|_| Ok(())), + ) + .unwrap(); + + let a = "0\tname0\n".to_string(); + let mut b = String::new(); + for i in 1..5_000 { + writeln!(b, "{0}\tname{0}", i).unwrap(); + } + let mut c = String::new(); + for i in 5_000..10_000 { + writeln!(c, "{0}\tname{0}", i).unwrap(); + } + + let stream = stream::iter_ok::<_, String>(vec![a, b, c]); + let rows = runtime + .block_on( + client + .prepare("COPY foo FROM STDIN") + .and_then(|s| client.copy_in(&s, &[], stream)), + ) + .unwrap(); + assert_eq!(rows, 10_000); +} + #[test] fn copy_in_error() { let _ = env_logger::try_init(); From 9dbeb849f8a114dca7f2c31dec885a4f08e8f04a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 25 Jun 2019 19:20:59 -0700 Subject: [PATCH 174/819] rustfmt --- tokio-postgres/src/proto/connect_raw.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs index 85368cd9f..feeb4bfc4 100644 --- a/tokio-postgres/src/proto/connect_raw.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -11,7 +11,7 @@ use std::collections::HashMap; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::{Client, Connection, MaybeTlsStream, PostgresCodec, TlsFuture, FrontendMessage}; +use crate::proto::{Client, Connection, FrontendMessage, MaybeTlsStream, PostgresCodec, TlsFuture}; use crate::tls::ChannelBinding; use crate::{Config, Error, TlsConnect}; From 2d2a5dea81be7fdac54ea1ea64b7a407efb13c9b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 27 Jun 2019 21:37:22 -0700 Subject: [PATCH 175/819] Send response messages in blocks Our codec implementation originally just parsed single messages out of the stream buffer. However, if a query returns a bunch of rows, we're spending a ton of time shipping those individual messages from the connection back to the Query stream. Instead, collect blocks of unparsed messages that are as large as possible and send those back. This cuts the processing time of the following query in half, from ~10 seconds to ~5: `SELECT s.n, 'name' || s.n FROM generate_series(0, 9999999) AS s(n)` At this point, almost all of the remainder of the time is spent parsing the rows. cc #450 --- Cargo.toml | 3 + postgres-protocol/src/message/backend.rs | 104 ++++++++++++++++++----- tokio-postgres/src/proto/bind.rs | 8 +- tokio-postgres/src/proto/client.rs | 6 +- tokio-postgres/src/proto/codec.rs | 70 ++++++++++++++- tokio-postgres/src/proto/connect_raw.rs | 74 ++++++++++++++-- tokio-postgres/src/proto/connection.rs | 51 ++++++----- tokio-postgres/src/proto/copy_in.rs | 13 +-- tokio-postgres/src/proto/copy_out.rs | 10 +-- tokio-postgres/src/proto/execute.rs | 8 +- tokio-postgres/src/proto/mod.rs | 11 +-- tokio-postgres/src/proto/prepare.rs | 14 +-- tokio-postgres/src/proto/query.rs | 6 +- tokio-postgres/src/proto/responses.rs | 42 +++++++++ tokio-postgres/src/proto/simple_query.rs | 6 +- 15 files changed, 324 insertions(+), 102 deletions(-) create mode 100644 tokio-postgres/src/proto/responses.rs diff --git a/Cargo.toml b/Cargo.toml index 40e30b1e8..37421ba99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,3 +7,6 @@ members = [ "tokio-postgres-native-tls", "tokio-postgres-openssl", ] + +[profile.release] +debug = 2 diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index f9c37b590..909f8bfd3 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -1,6 +1,6 @@ #![allow(missing_docs)] -use byteorder::{BigEndian, ReadBytesExt}; +use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use bytes::{Bytes, BytesMut}; use fallible_iterator::FallibleIterator; use memchr::memchr; @@ -11,6 +11,66 @@ use std::str; use crate::Oid; +pub const PARSE_COMPLETE_TAG: u8 = b'1'; +pub const BIND_COMPLETE_TAG: u8 = b'2'; +pub const CLOSE_COMPLETE_TAG: u8 = b'3'; +pub const NOTIFICATION_RESPONSE_TAG: u8 = b'A'; +pub const COPY_DONE_TAG: u8 = b'c'; +pub const COMMAND_COMPLETE_TAG: u8 = b'C'; +pub const COPY_DATA_TAG: u8 = b'd'; +pub const DATA_ROW_TAG: u8 = b'D'; +pub const ERROR_RESPONSE_TAG: u8 = b'E'; +pub const COPY_IN_RESPONSE_TAG: u8 = b'G'; +pub const COPY_OUT_RESPONSE_TAG: u8 = b'H'; +pub const EMPTY_QUERY_RESPONSE_TAG: u8 = b'I'; +pub const BACKEND_KEY_DATA_TAG: u8 = b'K'; +pub const NO_DATA_TAG: u8 = b'n'; +pub const NOTICE_RESPONSE_TAG: u8 = b'N'; +pub const AUTHENTICATION_TAG: u8 = b'R'; +pub const PORTAL_SUSPENDED_TAG: u8 = b's'; +pub const PARAMETER_STATUS_TAG: u8 = b'S'; +pub const PARAMETER_DESCRIPTION_TAG: u8 = b't'; +pub const ROW_DESCRIPTION_TAG: u8 = b'T'; +pub const READY_FOR_QUERY_TAG: u8 = b'Z'; + +#[derive(Debug, Copy, Clone)] +pub struct Header { + tag: u8, + len: i32, +} + +#[allow(clippy::len_without_is_empty)] +impl Header { + #[inline] + pub fn parse(buf: &[u8]) -> io::Result> { + if buf.len() < 5 { + return Ok(None); + } + + let tag = buf[0]; + let len = BigEndian::read_i32(&buf[1..]); + + if len < 4 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid message length", + )); + } + + Ok(Some(Header { tag, len })) + } + + #[inline] + pub fn tag(self) -> u8 { + self.tag + } + + #[inline] + pub fn len(self) -> i32 { + self.len + } +} + /// An enum representing Postgres backend messages. pub enum Message { AuthenticationCleartextPassword, @@ -80,10 +140,10 @@ impl Message { }; let message = match tag { - b'1' => Message::ParseComplete, - b'2' => Message::BindComplete, - b'3' => Message::CloseComplete, - b'A' => { + PARSE_COMPLETE_TAG => Message::ParseComplete, + BIND_COMPLETE_TAG => Message::BindComplete, + CLOSE_COMPLETE_TAG => Message::CloseComplete, + NOTIFICATION_RESPONSE_TAG => { let process_id = buf.read_i32::()?; let channel = buf.read_cstr()?; let message = buf.read_cstr()?; @@ -93,25 +153,25 @@ impl Message { message, }) } - b'c' => Message::CopyDone, - b'C' => { + COPY_DONE_TAG => Message::CopyDone, + COMMAND_COMPLETE_TAG => { let tag = buf.read_cstr()?; Message::CommandComplete(CommandCompleteBody { tag }) } - b'd' => { + COPY_DATA_TAG => { let storage = buf.read_all(); Message::CopyData(CopyDataBody { storage }) } - b'D' => { + DATA_ROW_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::DataRow(DataRowBody { storage, len }) } - b'E' => { + ERROR_RESPONSE_TAG => { let storage = buf.read_all(); Message::ErrorResponse(ErrorResponseBody { storage }) } - b'G' => { + COPY_IN_RESPONSE_TAG => { let format = buf.read_u8()?; let len = buf.read_u16::()?; let storage = buf.read_all(); @@ -121,7 +181,7 @@ impl Message { storage, }) } - b'H' => { + COPY_OUT_RESPONSE_TAG => { let format = buf.read_u8()?; let len = buf.read_u16::()?; let storage = buf.read_all(); @@ -131,8 +191,8 @@ impl Message { storage, }) } - b'I' => Message::EmptyQueryResponse, - b'K' => { + EMPTY_QUERY_RESPONSE_TAG => Message::EmptyQueryResponse, + BACKEND_KEY_DATA_TAG => { let process_id = buf.read_i32::()?; let secret_key = buf.read_i32::()?; Message::BackendKeyData(BackendKeyDataBody { @@ -140,12 +200,12 @@ impl Message { secret_key, }) } - b'n' => Message::NoData, - b'N' => { + NO_DATA_TAG => Message::NoData, + NOTICE_RESPONSE_TAG => { let storage = buf.read_all(); Message::NoticeResponse(NoticeResponseBody { storage }) } - b'R' => match buf.read_i32::()? { + AUTHENTICATION_TAG => match buf.read_i32::()? { 0 => Message::AuthenticationOk, 2 => Message::AuthenticationKerberosV5, 3 => Message::AuthenticationCleartextPassword, @@ -180,23 +240,23 @@ impl Message { )); } }, - b's' => Message::PortalSuspended, - b'S' => { + PORTAL_SUSPENDED_TAG => Message::PortalSuspended, + PARAMETER_STATUS_TAG => { let name = buf.read_cstr()?; let value = buf.read_cstr()?; Message::ParameterStatus(ParameterStatusBody { name, value }) } - b't' => { + PARAMETER_DESCRIPTION_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::ParameterDescription(ParameterDescriptionBody { storage, len }) } - b'T' => { + ROW_DESCRIPTION_TAG => { let len = buf.read_u16::()?; let storage = buf.read_all(); Message::RowDescription(RowDescriptionBody { storage, len }) } - b'Z' => { + READY_FOR_QUERY_TAG => { let status = buf.read_u8()?; Message::ReadyForQuery(ReadyForQueryBody { status }) } diff --git a/tokio-postgres/src/proto/bind.rs b/tokio-postgres/src/proto/bind.rs index 0a8a3b5f0..944afd6ea 100644 --- a/tokio-postgres/src/proto/bind.rs +++ b/tokio-postgres/src/proto/bind.rs @@ -1,10 +1,10 @@ -use futures::sync::mpsc; -use futures::{Poll, Stream}; +use futures::{try_ready, Poll, Stream}; use postgres_protocol::message::backend::Message; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use crate::proto::client::{Client, PendingRequest}; use crate::proto::portal::Portal; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::Error; @@ -19,7 +19,7 @@ pub enum Bind { }, #[state_machine_future(transitions(Finished))] ReadBindComplete { - receiver: mpsc::Receiver, + receiver: Responses, client: Client, name: String, statement: Statement, @@ -46,7 +46,7 @@ impl PollBind for Bind { fn poll_read_bind_complete<'a>( state: &'a mut RentToOwn<'a, ReadBindComplete>, ) -> Poll { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); let state = state.take(); match message { diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index abb2f5341..7fb070e90 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -3,7 +3,6 @@ use bytes::IntoBuf; use futures::sync::mpsc; use futures::{AsyncSink, Poll, Sink, Stream}; use postgres_protocol; -use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::collections::HashMap; use std::error::Error as StdError; @@ -20,6 +19,7 @@ use crate::proto::idle::{IdleGuard, IdleState}; use crate::proto::portal::Portal; use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; +use crate::proto::responses::{self, Responses}; use crate::proto::simple_query::SimpleQueryStream; use crate::proto::statement::Statement; #[cfg(feature = "runtime")] @@ -130,9 +130,9 @@ impl Client { self.0.state.lock().typeinfo_composite_query = Some(statement.clone()); } - pub fn send(&self, request: PendingRequest) -> Result, Error> { + pub fn send(&self, request: PendingRequest) -> Result { let (messages, idle) = request.0?; - let (sender, receiver) = mpsc::channel(1); + let (sender, receiver) = responses::channel(); self.0 .sender .unbounded_send(Request { diff --git a/tokio-postgres/src/proto/codec.rs b/tokio-postgres/src/proto/codec.rs index c7c6d9045..4ebebd479 100644 --- a/tokio-postgres/src/proto/codec.rs +++ b/tokio-postgres/src/proto/codec.rs @@ -1,4 +1,5 @@ use bytes::{Buf, BytesMut}; +use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend; use postgres_protocol::message::frontend::CopyData; use std::io; @@ -9,6 +10,31 @@ pub enum FrontendMessage { CopyData(CopyData>), } +pub enum BackendMessage { + Normal { + messages: BackendMessages, + request_complete: bool, + }, + Async(backend::Message), +} + +pub struct BackendMessages(BytesMut); + +impl BackendMessages { + pub fn empty() -> BackendMessages { + BackendMessages(BytesMut::new()) + } +} + +impl FallibleIterator for BackendMessages { + type Item = backend::Message; + type Error = io::Error; + + fn next(&mut self) -> io::Result> { + backend::Message::parse(&mut self.0) + } +} + pub struct PostgresCodec; impl Encoder for PostgresCodec { @@ -26,10 +52,48 @@ impl Encoder for PostgresCodec { } impl Decoder for PostgresCodec { - type Item = backend::Message; + type Item = BackendMessage; type Error = io::Error; - fn decode(&mut self, src: &mut BytesMut) -> Result, io::Error> { - backend::Message::parse(src) + fn decode(&mut self, src: &mut BytesMut) -> Result, io::Error> { + let mut idx = 0; + let mut request_complete = false; + + while let Some(header) = backend::Header::parse(&src[idx..])? { + let len = header.len() as usize + 1; + if src[idx..].len() < len { + break; + } + + match header.tag() { + backend::NOTICE_RESPONSE_TAG + | backend::NOTIFICATION_RESPONSE_TAG + | backend::PARAMETER_STATUS_TAG => { + if idx == 0 { + let message = backend::Message::parse(src)?.unwrap(); + return Ok(Some(BackendMessage::Async(message))); + } else { + break; + } + } + _ => {} + } + + idx += len; + + if header.tag() == backend::READY_FOR_QUERY_TAG { + request_complete = true; + break; + } + } + + if idx == 0 { + Ok(None) + } else { + Ok(Some(BackendMessage::Normal { + messages: BackendMessages(src.split_to(idx)), + request_complete, + })) + } } } diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs index feeb4bfc4..0cb0ec033 100644 --- a/tokio-postgres/src/proto/connect_raw.rs +++ b/tokio-postgres/src/proto/connect_raw.rs @@ -1,6 +1,6 @@ use fallible_iterator::FallibleIterator; -use futures::sink; use futures::sync::mpsc; +use futures::{sink, Async, AsyncSink}; use futures::{try_ready, Future, Poll, Sink, Stream}; use postgres_protocol::authentication; use postgres_protocol::authentication::sasl::{self, ScramSha256}; @@ -8,13 +8,64 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use std::collections::HashMap; +use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; +use crate::proto::codec::{BackendMessage, BackendMessages}; use crate::proto::{Client, Connection, FrontendMessage, MaybeTlsStream, PostgresCodec, TlsFuture}; use crate::tls::ChannelBinding; use crate::{Config, Error, TlsConnect}; +pub struct StartupStream { + inner: Framed, PostgresCodec>, + buf: BackendMessages, +} + +impl Sink for StartupStream +where + S: AsyncRead + AsyncWrite, + T: AsyncRead + AsyncWrite, +{ + type SinkItem = FrontendMessage; + type SinkError = io::Error; + + fn start_send(&mut self, item: FrontendMessage) -> io::Result> { + self.inner.start_send(item) + } + + fn poll_complete(&mut self) -> Poll<(), io::Error> { + self.inner.poll_complete() + } + + fn close(&mut self) -> Poll<(), io::Error> { + self.inner.close() + } +} + +impl Stream for StartupStream +where + S: AsyncRead + AsyncWrite, + T: AsyncRead + AsyncWrite, +{ + type Item = Message; + type Error = io::Error; + + fn poll(&mut self) -> Poll, io::Error> { + loop { + if let Some(message) = self.buf.next()? { + return Ok(Async::Ready(Some(message))); + } + + match try_ready!(self.inner.poll()) { + Some(BackendMessage::Async(message)) => return Ok(Async::Ready(Some(message))), + Some(BackendMessage::Normal { messages, .. }) => self.buf = messages, + None => return Ok(Async::Ready(None)), + } + } + } +} + #[derive(StateMachineFuture)] pub enum ConnectRaw where @@ -29,47 +80,47 @@ where }, #[state_machine_future(transitions(ReadingAuth))] SendingStartup { - future: sink::Send, PostgresCodec>>, + future: sink::Send>, config: Config, idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] ReadingAuth { - stream: Framed, PostgresCodec>, + stream: StartupStream, config: Config, idx: Option, channel_binding: ChannelBinding, }, #[state_machine_future(transitions(ReadingAuthCompletion))] SendingPassword { - future: sink::Send, PostgresCodec>>, + future: sink::Send>, config: Config, idx: Option, }, #[state_machine_future(transitions(ReadingSasl))] SendingSasl { - future: sink::Send, PostgresCodec>>, + future: sink::Send>, scram: ScramSha256, config: Config, idx: Option, }, #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] ReadingSasl { - stream: Framed, PostgresCodec>, + stream: StartupStream, scram: ScramSha256, config: Config, idx: Option, }, #[state_machine_future(transitions(ReadingInfo))] ReadingAuthCompletion { - stream: Framed, PostgresCodec>, + stream: StartupStream, config: Config, idx: Option, }, #[state_machine_future(transitions(Finished))] ReadingInfo { - stream: Framed, PostgresCodec>, + stream: StartupStream, process_id: i32, secret_key: i32, parameters: HashMap, @@ -109,6 +160,10 @@ where frontend::startup_message(params, &mut buf).map_err(Error::encode)?; let stream = Framed::new(stream, PostgresCodec); + let stream = StartupStream { + inner: stream, + buf: BackendMessages::empty(), + }; transition!(SendingStartup { future: stream.send(FrontendMessage::Raw(buf)), @@ -363,7 +418,8 @@ where state.config, state.idx, ); - let connection = Connection::new(state.stream, state.parameters, receiver); + let connection = + Connection::new(state.stream.inner, state.parameters, receiver); transition!(Finished((client, connection))) } Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs index 14559fd0a..222fd16ea 100644 --- a/tokio-postgres/src/proto/connection.rs +++ b/tokio-postgres/src/proto/connection.rs @@ -1,3 +1,4 @@ +use fallible_iterator::FallibleIterator; use futures::sync::mpsc; use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; use log::trace; @@ -8,7 +9,7 @@ use std::io; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; -use crate::proto::codec::{FrontendMessage, PostgresCodec}; +use crate::proto::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; use crate::proto::copy_in::CopyInReceiver; use crate::proto::idle::IdleGuard; use crate::{AsyncMessage, Notification}; @@ -24,12 +25,12 @@ pub enum RequestMessages { pub struct Request { pub messages: RequestMessages, - pub sender: mpsc::Sender, + pub sender: mpsc::Sender, pub idle: Option, } struct Response { - sender: mpsc::Sender, + sender: mpsc::Sender, _idle: Option, } @@ -45,7 +46,7 @@ pub struct Connection { parameters: HashMap, receiver: mpsc::UnboundedReceiver, pending_request: Option, - pending_response: Option, + pending_response: Option, responses: VecDeque, state: State, } @@ -74,7 +75,7 @@ where self.parameters.get(name).map(|s| &**s) } - fn poll_response(&mut self) -> Poll, io::Error> { + fn poll_response(&mut self) -> Poll, io::Error> { if let Some(message) = self.pending_response.take() { trace!("retrying pending response"); return Ok(Async::Ready(Some(message))); @@ -101,12 +102,12 @@ where } }; - let message = match message { - Message::NoticeResponse(body) => { + let (mut messages, request_complete) = match message { + BackendMessage::Async(Message::NoticeResponse(body)) => { let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?; return Ok(Some(AsyncMessage::Notice(error))); } - Message::NotificationResponse(body) => { + BackendMessage::Async(Message::NotificationResponse(body)) => { let notification = Notification { process_id: body.process_id(), channel: body.channel().map_err(Error::parse)?.to_string(), @@ -114,30 +115,29 @@ where }; return Ok(Some(AsyncMessage::Notification(notification))); } - Message::ParameterStatus(body) => { + BackendMessage::Async(Message::ParameterStatus(body)) => { self.parameters.insert( body.name().map_err(Error::parse)?.to_string(), body.value().map_err(Error::parse)?.to_string(), ); continue; } - m => m, + BackendMessage::Async(_) => unreachable!(), + BackendMessage::Normal { + messages, + request_complete, + } => (messages, request_complete), }; let mut response = match self.responses.pop_front() { Some(response) => response, - None => match message { - Message::ErrorResponse(error) => return Err(Error::db(error)), + None => match messages.next().map_err(Error::parse)? { + Some(Message::ErrorResponse(error)) => return Err(Error::db(error)), _ => return Err(Error::unexpected_message()), }, }; - let request_complete = match message { - Message::ReadyForQuery(_) => true, - _ => false, - }; - - match response.sender.start_send(message) { + match response.sender.start_send(messages) { // if the receiver's hung up we still need to page through the rest of the messages // designated to it Ok(AsyncSink::Ready) | Err(_) => { @@ -145,9 +145,12 @@ where self.responses.push_front(response); } } - Ok(AsyncSink::NotReady(message)) => { + Ok(AsyncSink::NotReady(messages)) => { self.responses.push_front(response); - self.pending_response = Some(message); + self.pending_response = Some(BackendMessage::Normal { + messages, + request_complete, + }); trace!("poll_read: waiting on sender"); return Ok(None); } @@ -161,8 +164,8 @@ where return Ok(Async::Ready(Some(message))); } - match try_ready_receive!(self.receiver.poll()) { - Some(request) => { + match self.receiver.poll() { + Ok(Async::Ready(Some(request))) => { trace!("polled new request"); self.responses.push_back(Response { sender: request.sender, @@ -170,7 +173,9 @@ where }); Ok(Async::Ready(Some(request.messages))) } - None => Ok(Async::Ready(None)), + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(()) => unreachable!("mpsc::Receiver doesn't error"), } } diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs index 80f09c520..762f1c462 100644 --- a/tokio-postgres/src/proto/copy_in.rs +++ b/tokio-postgres/src/proto/copy_in.rs @@ -10,6 +10,7 @@ use std::error::Error as StdError; use crate::proto::client::{Client, PendingRequest}; use crate::proto::codec::FrontendMessage; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::Error; @@ -82,7 +83,7 @@ where ReadCopyInResponse { stream: S, sender: mpsc::Sender, - receiver: mpsc::Receiver, + receiver: Responses, }, #[state_machine_future(transitions(WriteCopyDone))] WriteCopyData { @@ -90,15 +91,15 @@ where buf: BytesMut, pending_message: Option, sender: mpsc::Sender, - receiver: mpsc::Receiver, + receiver: Responses, }, #[state_machine_future(transitions(ReadCommandComplete))] WriteCopyDone { future: sink::Send>, - receiver: mpsc::Receiver, + receiver: Responses, }, #[state_machine_future(transitions(Finished))] - ReadCommandComplete { receiver: mpsc::Receiver }, + ReadCommandComplete { receiver: Responses }, #[state_machine_future(ready)] Finished(u64), #[state_machine_future(error)] @@ -128,7 +129,7 @@ where state: &'a mut RentToOwn<'a, ReadCopyInResponse>, ) -> Poll, Error> { loop { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); match message { Some(Message::BindComplete) => {} @@ -229,7 +230,7 @@ where fn poll_read_command_complete<'a>( state: &'a mut RentToOwn<'a, ReadCommandComplete>, ) -> Poll { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); match message { Some(Message::CommandComplete(body)) => { diff --git a/tokio-postgres/src/proto/copy_out.rs b/tokio-postgres/src/proto/copy_out.rs index c0418222a..1ae714188 100644 --- a/tokio-postgres/src/proto/copy_out.rs +++ b/tokio-postgres/src/proto/copy_out.rs @@ -1,10 +1,10 @@ use bytes::Bytes; -use futures::sync::mpsc; use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; use std::mem; use crate::proto::client::{Client, PendingRequest}; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::Error; @@ -15,10 +15,10 @@ enum State { statement: Statement, }, ReadingCopyOutResponse { - receiver: mpsc::Receiver, + receiver: Responses, }, ReadingCopyData { - receiver: mpsc::Receiver, + receiver: Responses, }, Done, } @@ -49,7 +49,7 @@ impl Stream for CopyOutStream { self.0 = State::ReadingCopyOutResponse { receiver }; break Ok(Async::NotReady); } - Err(()) => unreachable!("mpsc::Receiver doesn't return errors"), + Err(e) => return Err(e), }; match message { @@ -71,7 +71,7 @@ impl Stream for CopyOutStream { self.0 = State::ReadingCopyData { receiver }; break Ok(Async::NotReady); } - Err(()) => unreachable!("mpsc::Reciever doesn't return errors"), + Err(e) => return Err(e), }; match message { diff --git a/tokio-postgres/src/proto/execute.rs b/tokio-postgres/src/proto/execute.rs index 25b1e90a0..0f8e021fe 100644 --- a/tokio-postgres/src/proto/execute.rs +++ b/tokio-postgres/src/proto/execute.rs @@ -1,9 +1,9 @@ -use futures::sync::mpsc; -use futures::{Poll, Stream}; +use futures::{try_ready, Poll, Stream}; use postgres_protocol::message::backend::Message; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; use crate::proto::client::{Client, PendingRequest}; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::Error; @@ -16,7 +16,7 @@ pub enum Execute { statement: Statement, }, #[state_machine_future(transitions(Finished))] - ReadResponse { receiver: mpsc::Receiver }, + ReadResponse { receiver: Responses }, #[state_machine_future(ready)] Finished(u64), #[state_machine_future(error)] @@ -36,7 +36,7 @@ impl PollExecute for Execute { state: &'a mut RentToOwn<'a, ReadResponse>, ) -> Poll { loop { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); match message { Some(Message::BindComplete) => {} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs index 7c30cda85..2979d8a30 100644 --- a/tokio-postgres/src/proto/mod.rs +++ b/tokio-postgres/src/proto/mod.rs @@ -1,13 +1,3 @@ -macro_rules! try_ready_receive { - ($e:expr) => { - match $e { - Ok(::futures::Async::Ready(v)) => v, - Ok(::futures::Async::NotReady) => return Ok(::futures::Async::NotReady), - Err(()) => unreachable!("mpsc::Receiver doesn't return errors"), - } - }; -} - macro_rules! try_ready_closed { ($e:expr) => { match $e { @@ -40,6 +30,7 @@ mod maybe_tls_stream; mod portal; mod prepare; mod query; +mod responses; mod simple_query; mod statement; mod tls; diff --git a/tokio-postgres/src/proto/prepare.rs b/tokio-postgres/src/proto/prepare.rs index 029bbb8a5..a29aca11b 100644 --- a/tokio-postgres/src/proto/prepare.rs +++ b/tokio-postgres/src/proto/prepare.rs @@ -1,7 +1,6 @@ #![allow(clippy::large_enum_variant)] use fallible_iterator::FallibleIterator; -use futures::sync::mpsc; use futures::{try_ready, Future, Poll, Stream}; use postgres_protocol::message::backend::Message; use state_machine_future::{transition, RentToOwn, StateMachineFuture}; @@ -9,6 +8,7 @@ use std::mem; use std::vec; use crate::proto::client::{Client, PendingRequest}; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::proto::typeinfo::TypeinfoFuture; use crate::types::{Oid, Type}; @@ -25,19 +25,19 @@ pub enum Prepare { #[state_machine_future(transitions(ReadParameterDescription))] ReadParseComplete { client: Client, - receiver: mpsc::Receiver, + receiver: Responses, name: String, }, #[state_machine_future(transitions(ReadRowDescription))] ReadParameterDescription { client: Client, - receiver: mpsc::Receiver, + receiver: Responses, name: String, }, #[state_machine_future(transitions(GetParameterTypes, GetColumnTypes, Finished))] ReadRowDescription { client: Client, - receiver: mpsc::Receiver, + receiver: Responses, name: String, parameters: Vec, }, @@ -79,7 +79,7 @@ impl PollPrepare for Prepare { fn poll_read_parse_complete<'a>( state: &'a mut RentToOwn<'a, ReadParseComplete>, ) -> Poll { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); let state = state.take(); match message { @@ -97,7 +97,7 @@ impl PollPrepare for Prepare { fn poll_read_parameter_description<'a>( state: &'a mut RentToOwn<'a, ReadParameterDescription>, ) -> Poll { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); let state = state.take(); match message { @@ -115,7 +115,7 @@ impl PollPrepare for Prepare { fn poll_read_row_description<'a>( state: &'a mut RentToOwn<'a, ReadRowDescription>, ) -> Poll { - let message = try_ready_receive!(state.receiver.poll()); + let message = try_ready!(state.receiver.poll()); let state = state.take(); let columns = match message { diff --git a/tokio-postgres/src/proto/query.rs b/tokio-postgres/src/proto/query.rs index 59877f061..2d84abdee 100644 --- a/tokio-postgres/src/proto/query.rs +++ b/tokio-postgres/src/proto/query.rs @@ -1,10 +1,10 @@ -use futures::sync::mpsc; use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; use std::mem; use crate::proto::client::{Client, PendingRequest}; use crate::proto::portal::Portal; +use crate::proto::responses::Responses; use crate::proto::statement::Statement; use crate::{Error, Row}; @@ -31,7 +31,7 @@ enum State { statement: T, }, ReadingResponse { - receiver: mpsc::Receiver, + receiver: Responses, statement: T, }, Done, @@ -73,7 +73,7 @@ where }; break Ok(Async::NotReady); } - Err(()) => unreachable!("mpsc::Receiver doesn't return errors"), + Err(e) => return Err(e), }; match message { diff --git a/tokio-postgres/src/proto/responses.rs b/tokio-postgres/src/proto/responses.rs new file mode 100644 index 000000000..7cc259a83 --- /dev/null +++ b/tokio-postgres/src/proto/responses.rs @@ -0,0 +1,42 @@ +use fallible_iterator::FallibleIterator; +use futures::sync::mpsc; +use futures::{try_ready, Async, Poll, Stream}; +use postgres_protocol::message::backend; + +use crate::proto::codec::BackendMessages; +use crate::Error; + +pub fn channel() -> (mpsc::Sender, Responses) { + let (sender, receiver) = mpsc::channel(1); + + ( + sender, + Responses { + receiver, + cur: BackendMessages::empty(), + }, + ) +} + +pub struct Responses { + receiver: mpsc::Receiver, + cur: BackendMessages, +} + +impl Stream for Responses { + type Item = backend::Message; + type Error = Error; + + fn poll(&mut self) -> Poll, Error> { + loop { + if let Some(message) = self.cur.next().map_err(Error::parse)? { + return Ok(Async::Ready(Some(message))); + } + + match try_ready!(self.receiver.poll().map_err(|()| Error::closed())) { + Some(messages) => self.cur = messages, + None => return Ok(Async::Ready(None)), + } + } + } +} diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs index 71f458a84..fdfb52270 100644 --- a/tokio-postgres/src/proto/simple_query.rs +++ b/tokio-postgres/src/proto/simple_query.rs @@ -1,11 +1,11 @@ use fallible_iterator::FallibleIterator; -use futures::sync::mpsc; use futures::{Async, Poll, Stream}; use postgres_protocol::message::backend::Message; use std::mem; use std::sync::Arc; use crate::proto::client::{Client, PendingRequest}; +use crate::proto::responses::Responses; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; pub enum State { @@ -15,7 +15,7 @@ pub enum State { }, ReadResponse { columns: Option>, - receiver: mpsc::Receiver, + receiver: Responses, }, Done, } @@ -46,7 +46,7 @@ impl Stream for SimpleQueryStream { self.0 = State::ReadResponse { columns, receiver }; return Ok(Async::NotReady); } - Err(()) => unreachable!("mpsc receiver can't panic"), + Err(e) => return Err(e), }; match message { From 3f264027c9cacf32d6e75479a87b47a1322d1212 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 12:32:46 -0700 Subject: [PATCH 176/819] Fix some typos --- postgres/src/client.rs | 2 ++ tokio-postgres/src/types/mod.rs | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 64d14eeb3..c811e74d1 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -148,6 +148,7 @@ impl Client { /// } /// # Ok(()) /// # } + /// ``` pub fn query_iter( &mut self, query: &T, @@ -213,6 +214,7 @@ impl Client { /// } /// # Ok(()) /// # } + /// ``` pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { self.0.prepare_typed(query, types).wait() } diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index a13e59bfa..6dc2692a5 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -297,8 +297,7 @@ pub trait FromSql<'a>: Sized { /// The caller of this method is responsible for ensuring that this type /// is compatible with the Postgres `Type`. /// - /// The default implementation returns - /// `Err(Box::new(WasNull))`. + /// The default implementation returns `Err(Box::new(WasNull))`. #[allow(unused_variables)] fn from_sql_null(ty: &Type) -> Result> { Err(Box::new(WasNull)) @@ -543,7 +542,7 @@ pub enum IsNull { /// # Arrays /// /// `ToSql` is implemented for `Vec` and `&[T]` where `T` implements `ToSql`, -/// and corresponds to one-dimentional Postgres arrays with an index offset of 1. +/// and corresponds to one-dimensional Postgres arrays with an index offset of 1. pub trait ToSql: fmt::Debug { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. From e760d82f64fc29af39a725bf50089d54afd1937b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 16:07:56 -0700 Subject: [PATCH 177/819] Drop tokio- prefix from TLS crates They're way too wordy and used with both tokio-postgres and postgres anyway. --- Cargo.toml | 4 ++-- .../CHANGELOG.md | 0 {tokio-postgres-native-tls => postgres-native-tls}/Cargo.toml | 2 +- .../LICENSE-APACHE | 0 .../LICENSE-MIT | 0 {tokio-postgres-native-tls => postgres-native-tls}/src/lib.rs | 4 ++-- .../src/test.rs | 0 {tokio-postgres-openssl => postgres-openssl}/CHANGELOG.md | 0 {tokio-postgres-openssl => postgres-openssl}/Cargo.toml | 2 +- {tokio-postgres-openssl => postgres-openssl}/LICENSE-APACHE | 0 {tokio-postgres-openssl => postgres-openssl}/LICENSE-MIT | 0 {tokio-postgres-openssl => postgres-openssl}/src/lib.rs | 4 ++-- {tokio-postgres-openssl => postgres-openssl}/src/test.rs | 0 tokio-postgres/src/lib.rs | 4 ++-- 14 files changed, 10 insertions(+), 10 deletions(-) rename {tokio-postgres-native-tls => postgres-native-tls}/CHANGELOG.md (100%) rename {tokio-postgres-native-tls => postgres-native-tls}/Cargo.toml (95%) rename {tokio-postgres-native-tls => postgres-native-tls}/LICENSE-APACHE (100%) rename {tokio-postgres-native-tls => postgres-native-tls}/LICENSE-MIT (100%) rename {tokio-postgres-native-tls => postgres-native-tls}/src/lib.rs (97%) rename {tokio-postgres-native-tls => postgres-native-tls}/src/test.rs (100%) rename {tokio-postgres-openssl => postgres-openssl}/CHANGELOG.md (100%) rename {tokio-postgres-openssl => postgres-openssl}/Cargo.toml (95%) rename {tokio-postgres-openssl => postgres-openssl}/LICENSE-APACHE (100%) rename {tokio-postgres-openssl => postgres-openssl}/LICENSE-MIT (100%) rename {tokio-postgres-openssl => postgres-openssl}/src/lib.rs (98%) rename {tokio-postgres-openssl => postgres-openssl}/src/test.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 37421ba99..cf7f99ca2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,10 +2,10 @@ members = [ "codegen", "postgres", + "postgres-native-tls", + "postgres-openssl", "postgres-protocol", "tokio-postgres", - "tokio-postgres-native-tls", - "tokio-postgres-openssl", ] [profile.release] diff --git a/tokio-postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md similarity index 100% rename from tokio-postgres-native-tls/CHANGELOG.md rename to postgres-native-tls/CHANGELOG.md diff --git a/tokio-postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml similarity index 95% rename from tokio-postgres-native-tls/Cargo.toml rename to postgres-native-tls/Cargo.toml index ec2b91069..a63fdc42d 100644 --- a/tokio-postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tokio-postgres-native-tls" +name = "postgres-native-tls" version = "0.1.0-rc.1" authors = ["Steven Fackler "] edition = "2018" diff --git a/tokio-postgres-native-tls/LICENSE-APACHE b/postgres-native-tls/LICENSE-APACHE similarity index 100% rename from tokio-postgres-native-tls/LICENSE-APACHE rename to postgres-native-tls/LICENSE-APACHE diff --git a/tokio-postgres-native-tls/LICENSE-MIT b/postgres-native-tls/LICENSE-MIT similarity index 100% rename from tokio-postgres-native-tls/LICENSE-MIT rename to postgres-native-tls/LICENSE-MIT diff --git a/tokio-postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs similarity index 97% rename from tokio-postgres-native-tls/src/lib.rs rename to postgres-native-tls/src/lib.rs index b0fcf4b0d..b269594a3 100644 --- a/tokio-postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -4,7 +4,7 @@ //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; -//! use tokio_postgres_native_tls::MakeTlsConnector; +//! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { @@ -27,7 +27,7 @@ //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; -//! use tokio_postgres_native_tls::MakeTlsConnector; +//! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { diff --git a/tokio-postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs similarity index 100% rename from tokio-postgres-native-tls/src/test.rs rename to postgres-native-tls/src/test.rs diff --git a/tokio-postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md similarity index 100% rename from tokio-postgres-openssl/CHANGELOG.md rename to postgres-openssl/CHANGELOG.md diff --git a/tokio-postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml similarity index 95% rename from tokio-postgres-openssl/Cargo.toml rename to postgres-openssl/Cargo.toml index 8e1ba0750..ca50aa421 100644 --- a/tokio-postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tokio-postgres-openssl" +name = "postgres-openssl" version = "0.1.0-rc.1" authors = ["Steven Fackler "] edition = "2018" diff --git a/tokio-postgres-openssl/LICENSE-APACHE b/postgres-openssl/LICENSE-APACHE similarity index 100% rename from tokio-postgres-openssl/LICENSE-APACHE rename to postgres-openssl/LICENSE-APACHE diff --git a/tokio-postgres-openssl/LICENSE-MIT b/postgres-openssl/LICENSE-MIT similarity index 100% rename from tokio-postgres-openssl/LICENSE-MIT rename to postgres-openssl/LICENSE-MIT diff --git a/tokio-postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs similarity index 98% rename from tokio-postgres-openssl/src/lib.rs rename to postgres-openssl/src/lib.rs index 220987417..57486bad1 100644 --- a/tokio-postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -4,7 +4,7 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; -//! use tokio_postgres_openssl::MakeTlsConnector; +//! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { //! let mut builder = SslConnector::builder(SslMethod::tls())?; @@ -23,7 +23,7 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; -//! use tokio_postgres_openssl::MakeTlsConnector; +//! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { //! let mut builder = SslConnector::builder(SslMethod::tls())?; diff --git a/tokio-postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs similarity index 100% rename from tokio-postgres-openssl/src/test.rs rename to postgres-openssl/src/test.rs diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 78c653311..332c388d0 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -106,8 +106,8 @@ //! //! TLS support is implemented via external libraries. `Client::connect` and `Config::connect` take a TLS implementation //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the -//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `openssl` and -//! `native-tls` crates, respectively. +//! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` +//! crates, respectively. #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] From 47d83d57a02c3b96b11ce15688764b0c320c6e93 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 21:06:23 -0700 Subject: [PATCH 178/819] Release postgres-protocol v0.4.1 --- postgres-protocol/CHANGELOG.md | 11 +++++++++-- postgres-protocol/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 71ca501c8..7d24a6a25 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -2,7 +2,13 @@ ## [Unreleased] -## [v0.4.0] - 2019-05-03 +## [v0.4.1] - 2019-06-29 + +### Added + +* Added `backend::Framed` to minimally parse the structure of backend messages. + +## [v0.4.0] - 2019-03-05 ### Added @@ -17,6 +23,7 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.0...master +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.1...master +[v0.4.1]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.0...postgres-protocol-v0.4.1 [v0.4.0]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.3.2...postgres-protocol-v0.4.0 [release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 993e3a27d..e4d8a5069 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.4.0" +version = "0.4.1" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index bcdef6db1..8d2477216 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -38,7 +38,7 @@ futures = "0.1.7" log = "0.4" percent-encoding = "1.0" phf = "0.7.23" -postgres-protocol = { version = "0.4.0", path = "../postgres-protocol" } +postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } state_machine_future = "0.2" tokio-codec = "0.1" tokio-io = "0.1" From 310f0ebfc632f125c93ae7984b7343672a848b5f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 21:18:28 -0700 Subject: [PATCH 179/819] Release tokio-postgres v0.4.0-rc.3 --- tokio-postgres/CHANGELOG.md | 13 ++++++++++++- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/lib.rs | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 6b833bccc..33be91bb0 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -2,6 +2,16 @@ ## [Unreleased] +## [v0.4.0-rc.3] - 2019-06-29 + +### Fixed + +* Significantly improved the performance of `query` and `copy_in`. + +### Changed + +* The items of the stream passed to `copy_in` must be `'static`. + ## [v0.4.0-rc.2] - 2019-03-05 ### Fixed @@ -43,7 +53,8 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.2...master +[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.3...master +[v0.4.0-rc.3]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.2...tokio-postgres-v0.4.0-rc.3 [v0.4.0-rc.2]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.1...tokio-postgres-v0.4.0-rc.2 [v0.4.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.3.0...tokio-postgres-v0.4.0-rc.1 [release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 8d2477216..8d2d6d00a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.4.0-rc.2" +version = "0.4.0-rc.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 332c388d0..77c5f187f 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -108,7 +108,7 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` //! crates, respectively. -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.1")] +#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use bytes::IntoBuf; From 4cc18be35b8aa3f77e2eccc4bb5316d90f2a4387 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 21:32:16 -0700 Subject: [PATCH 180/819] Release postgres-native-tls v0.2.0-rc.1 --- postgres-native-tls/CHANGELOG.md | 6 ++---- postgres-native-tls/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index 008c0eaf9..fac4dabc9 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -2,8 +2,6 @@ ## [Unreleased] -## v0.1.0-rc.1 - 2019-03-06 +## v0.2.0-rc.1 - 2019-06-29 -* Initial release. - -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-native-tls-v0.1.0-rc.1...master +* Updated to tokio-postgres v0.4.0-rc. diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index b269594a3..a68130380 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -45,7 +45,7 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/tokio-postgres-native-tls/0.1.0-rc.1")] +#![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; From 6464954e077c24b075413122af3abc88f5570ee5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 21:39:25 -0700 Subject: [PATCH 181/819] Release postgres-openssl v0.2.0-rc.1 --- postgres-native-tls/CHANGELOG.md | 2 -- postgres-openssl/CHANGELOG.md | 8 ++------ postgres-openssl/Cargo.toml | 2 +- postgres-openssl/src/lib.rs | 2 +- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index fac4dabc9..46fab3005 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,7 +1,5 @@ # Change Log -## [Unreleased] - ## v0.2.0-rc.1 - 2019-06-29 * Updated to tokio-postgres v0.4.0-rc. diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index f0c20b8c5..90d09aad0 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,9 +1,5 @@ # Change Log -## [Unreleased] +## v0.2.0-rc.1 - 2019-03-06 -## v0.1.0-rc.1 - 2019-03-06 - -* Initial release. - -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-openssl-v0.1.0-rc.1...master +* Updated to `tokio-postgres` v0.4.0-rc. diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index ca50aa421..a45f8acbc 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.1.0-rc.1" +version = "0.2.0-rc.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 57486bad1..c1999b142 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -39,7 +39,7 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/tokio-postgres-openssl/0.1.0-rc.1")] +#![doc(html_root_url = "https://docs.rs/postgres-openssl/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use futures::{try_ready, Async, Future, Poll}; From 904c951ad69b66126c533ea7d017cde67b8436de Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 29 Jun 2019 21:43:50 -0700 Subject: [PATCH 182/819] Release postgres v0.16.0-rc.2 --- postgres-protocol/CHANGELOG.md | 9 ++------- postgres/CHANGELOG.md | 10 ++++++---- postgres/Cargo.toml | 2 +- postgres/src/lib.rs | 6 +++--- tokio-postgres/CHANGELOG.md | 12 +++--------- 5 files changed, 15 insertions(+), 24 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 7d24a6a25..170e06f8e 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,14 +1,12 @@ # Change Log -## [Unreleased] - -## [v0.4.1] - 2019-06-29 +## v0.4.1 - 2019-06-29 ### Added * Added `backend::Framed` to minimally parse the structure of backend messages. -## [v0.4.0] - 2019-03-05 +## v0.4.0 - 2019-03-05 ### Added @@ -23,7 +21,4 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.1...master -[v0.4.1]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.4.0...postgres-protocol-v0.4.1 -[v0.4.0]: https://github.com/sfackler/rust-postgres/compare/postgres-protocol-v0.3.2...postgres-protocol-v0.4.0 [release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 1bfc907dc..151251ef0 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,8 +1,12 @@ # Change Log -## [Unreleased] +## v0.16.0-rc.2 - 2019-06-29 -## [v0.16.0-rc.1] - 2019-04-06 +### Fixed + +* Documentation fixes + +## v0.16.0-rc.1 - 2019-04-06 ### Changed @@ -93,6 +97,4 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/postgres-v0.16.0-rc.1...master -[v0.16.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/postgres-v0.15.2...postgres-v0.16.0-rc.1 [release tags]: https://github.com/sfackler/rust-postgres/releases diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index dc9292072..5200f788b 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.16.0-rc.1" +version = "0.16.0-rc.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 67304ba83..27989cde6 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -50,9 +50,9 @@ //! //! TLS support is implemented via external libraries. `Client::connect` and `Config::connect` take a TLS implementation //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the -//! `tokio-postgres-openssl` and `tokio-postgres-native-tls` crates provide implementations backed by the `openssl` and -//! `native-tls` crates, respectively. -#![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.1")] +//! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` +//! crates, respectively. +#![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] #[cfg(feature = "runtime")] diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 33be91bb0..d55f7f9af 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,8 +1,6 @@ # Change Log -## [Unreleased] - -## [v0.4.0-rc.3] - 2019-06-29 +## v0.4.0-rc.3 - 2019-06-29 ### Fixed @@ -12,13 +10,13 @@ * The items of the stream passed to `copy_in` must be `'static`. -## [v0.4.0-rc.2] - 2019-03-05 +## v0.4.0-rc.2 - 2019-03-05 ### Fixed * Fixed Cargo features to actually enable the functionality they claim to. -## [v0.4.0-rc.1] - 2019-03-05 +## v0.4.0-rc.1 - 2019-03-05 ### Changed @@ -53,8 +51,4 @@ Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.3...master -[v0.4.0-rc.3]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.2...tokio-postgres-v0.4.0-rc.3 -[v0.4.0-rc.2]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.4.0-rc.1...tokio-postgres-v0.4.0-rc.2 -[v0.4.0-rc.1]: https://github.com/sfackler/rust-postgres/compare/tokio-postgres-v0.3.0...tokio-postgres-v0.4.0-rc.1 [release tags]: https://github.com/sfackler/rust-postgres/releases From effd4df463841986d6a3e6fa33d5ce106497100f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 5 Jul 2019 11:02:09 -0700 Subject: [PATCH 183/819] Release postgres-native-tls 0.2.0-rc.1 For real this time --- postgres-native-tls/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index a63fdc42d..7d891acd3 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.1.0-rc.1" +version = "0.2.0-rc.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 8192c771c0df2cb9af8bf77f19a61473e37f1d1c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 9 Jul 2019 19:00:10 -0700 Subject: [PATCH 184/819] Add methods that take iterators of parameters The existing methods which take slices of parameters work well when directly passing a temporary slice (e.g. `c.query(s, &[&15, &"hi"])`, but becomes limiting in other contexts like when programmatically building up a query. We now offer methods which take iterators, which are significantly more flexible for these kinds of use cases. Because of the weird object safety of `ToSql`, we can't be generic over `Iterator`, but instead have to specifically work with `Iterator`. This may require a `.map()` or two but should still work fine. Closes #265 --- tokio-postgres/src/lib.rs | 74 +++++++++++++++++++ tokio-postgres/src/proto/client.rs | 57 +++++++++----- tokio-postgres/src/proto/typeinfo.rs | 17 ++++- .../src/proto/typeinfo_composite.rs | 12 ++- tokio-postgres/src/proto/typeinfo_enum.rs | 17 ++++- 5 files changed, 149 insertions(+), 28 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 77c5f187f..1aaf82020 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -197,6 +197,17 @@ impl Client { /// /// Panics if the number of parameters provided does not match the number expected. pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Execute { + self.execute_iter(statement, params.iter().cloned()) + } + + /// Like [`execute`], but takes an iterator of parameters rather than a slice. + /// + /// [`execute`]: #method.execute + pub fn execute_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Execute + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { impls::Execute(self.0.execute(&statement.0, params)) } @@ -206,6 +217,17 @@ impl Client { /// /// Panics if the number of parameters provided does not match the number expected. pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Query { + self.query_iter(statement, params.iter().cloned()) + } + + /// Like [`query`], but takes an iterator of parameters rather than a slice. + /// + /// [`query`]: #method.query + pub fn query_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Query + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { impls::Query(self.0.query(&statement.0, params)) } @@ -214,10 +236,22 @@ impl Client { /// Portals only last for the duration of the transaction in which they are created - in particular, a portal /// created outside of a transaction is immediately destroyed. Portals can only be used on the connection that /// created them. + /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Bind { + self.bind_iter(statement, params.iter().cloned()) + } + + /// Like [`bind`], but takes an iterator of parameters rather than a slice. + /// + /// [`bind`]: #method.bind + pub fn bind_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Bind + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { impls::Bind(self.0.bind(&statement.0, next_portal(), params)) } @@ -233,6 +267,10 @@ impl Client { /// /// The data in the provided stream is passed along to the server verbatim; it is the caller's responsibility to /// ensure it uses the proper format. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn copy_in( &mut self, statement: &Statement, @@ -245,12 +283,48 @@ impl Client { ::Buf: 'static + Send, // FIXME error type? S::Error: Into>, + { + self.copy_in_iter(statement, params.iter().cloned(), stream) + } + + /// Like [`copy_in`], except that it takes an iterator of parameters rather than a slice. + /// + /// [`copy_in`]: #method.copy_in + pub fn copy_in_iter<'a, I, S>( + &mut self, + statement: &Statement, + params: I, + stream: S, + ) -> impls::CopyIn + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + S: Stream, + S::Item: IntoBuf, + ::Buf: 'static + Send, + // FIXME error type? + S::Error: Into>, { impls::CopyIn(self.0.copy_in(&statement.0, params, stream)) } /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::CopyOut { + self.copy_out_iter(statement, params.iter().cloned()) + } + + /// Like [`copy_out`], except that it takes an iterator of parameters rather than a slice. + /// + /// [`copy_out`]: #method.copy_out + pub fn copy_out_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::CopyOut + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { impls::CopyOut(self.0.copy_out(&statement.0, params)) } diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs index 7fb070e90..46184bb9d 100644 --- a/tokio-postgres/src/proto/client.rs +++ b/tokio-postgres/src/proto/client.rs @@ -165,7 +165,11 @@ impl Client { PrepareFuture::new(self.clone(), pending, name) } - pub fn execute(&self, statement: &Statement, params: &[&dyn ToSql]) -> ExecuteFuture { + pub fn execute<'a, I>(&self, statement: &Statement, params: I) -> ExecuteFuture + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { let pending = PendingRequest( self.excecute_message(statement, params) .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), @@ -173,7 +177,11 @@ impl Client { ExecuteFuture::new(self.clone(), pending, statement.clone()) } - pub fn query(&self, statement: &Statement, params: &[&dyn ToSql]) -> QueryStream { + pub fn query<'a, I>(&self, statement: &Statement, params: I) -> QueryStream + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { let pending = PendingRequest( self.excecute_message(statement, params) .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), @@ -181,7 +189,11 @@ impl Client { QueryStream::new(self.clone(), pending, statement.clone()) } - pub fn bind(&self, statement: &Statement, name: String, params: &[&dyn ToSql]) -> BindFuture { + pub fn bind<'a, I>(&self, statement: &Statement, name: String, params: I) -> BindFuture + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { let mut buf = self.bind_message(statement, &name, params); if let Ok(ref mut buf) = buf { frontend::sync(buf); @@ -204,17 +216,14 @@ impl Client { QueryStream::new(self.clone(), pending, portal.clone()) } - pub fn copy_in( - &self, - statement: &Statement, - params: &[&dyn ToSql], - stream: S, - ) -> CopyInFuture + pub fn copy_in<'a, S, I>(&self, statement: &Statement, params: I, stream: S) -> CopyInFuture where S: Stream, S::Item: IntoBuf, ::Buf: 'static + Send, S::Error: Into>, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, { let (mut sender, receiver) = mpsc::channel(1); let pending = PendingRequest(self.excecute_message(statement, params).map(|data| { @@ -233,7 +242,11 @@ impl Client { CopyInFuture::new(self.clone(), pending, statement.clone(), stream, sender) } - pub fn copy_out(&self, statement: &Statement, params: &[&dyn ToSql]) -> CopyOutStream { + pub fn copy_out<'a, I>(&self, statement: &Statement, params: I) -> CopyOutStream + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { let pending = PendingRequest( self.excecute_message(statement, params) .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), @@ -289,12 +302,18 @@ impl Client { }); } - fn bind_message( + fn bind_message<'a, I>( &self, statement: &Statement, name: &str, - params: &[&dyn ToSql], - ) -> Result, Error> { + params: I, + ) -> Result, Error> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let params = params.into_iter(); + assert!( statement.params().len() == params.len(), "expected {} parameters but got {}", @@ -308,7 +327,7 @@ impl Client { name, statement.name(), Some(1), - params.iter().zip(statement.params()).enumerate(), + params.zip(statement.params()).enumerate(), |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), @@ -327,11 +346,15 @@ impl Client { } } - fn excecute_message( + fn excecute_message<'a, I>( &self, statement: &Statement, - params: &[&dyn ToSql], - ) -> Result { + params: I, + ) -> Result + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { let mut buf = self.bind_message(statement, "", params)?; frontend::execute("", 0, &mut buf).map_err(Error::parse)?; frontend::sync(&mut buf); diff --git a/tokio-postgres/src/proto/typeinfo.rs b/tokio-postgres/src/proto/typeinfo.rs index 02e35eaeb..15a657b6c 100644 --- a/tokio-postgres/src/proto/typeinfo.rs +++ b/tokio-postgres/src/proto/typeinfo.rs @@ -10,7 +10,7 @@ use crate::proto::query::QueryStream; use crate::proto::statement::Statement; use crate::proto::typeinfo_composite::TypeinfoCompositeFuture; use crate::proto::typeinfo_enum::TypeinfoEnumFuture; -use crate::types::{Kind, Oid, Type}; +use crate::types::{Kind, Oid, ToSql, Type}; const TYPEINFO_QUERY: &str = " SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid @@ -114,7 +114,10 @@ impl PollTypeinfo for Typeinfo { match state.client.typeinfo_query() { Some(statement) => transition!(QueryingTypeinfo { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), oid: state.oid, client: state.client, }), @@ -149,7 +152,10 @@ impl PollTypeinfo for Typeinfo { }; let state = state.take(); - let future = state.client.query(&statement, &[&state.oid]).collect(); + let future = state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(); state.client.set_typeinfo_query(&statement); transition!(QueryingTypeinfo { future, @@ -164,7 +170,10 @@ impl PollTypeinfo for Typeinfo { let statement = try_ready!(state.future.poll()); let state = state.take(); - let future = state.client.query(&statement, &[&state.oid]).collect(); + let future = state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(); state.client.set_typeinfo_query(&statement); transition!(QueryingTypeinfo { future, diff --git a/tokio-postgres/src/proto/typeinfo_composite.rs b/tokio-postgres/src/proto/typeinfo_composite.rs index f424fabcb..31398389d 100644 --- a/tokio-postgres/src/proto/typeinfo_composite.rs +++ b/tokio-postgres/src/proto/typeinfo_composite.rs @@ -11,7 +11,7 @@ use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; use crate::proto::statement::Statement; use crate::proto::typeinfo::TypeinfoFuture; -use crate::types::{Field, Oid}; +use crate::types::{Field, Oid, ToSql}; const TYPEINFO_COMPOSITE_QUERY: &str = " SELECT attname, atttypid @@ -59,7 +59,10 @@ impl PollTypeinfoComposite for TypeinfoComposite { match state.client.typeinfo_composite_query() { Some(statement) => transition!(QueryingCompositeFields { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), client: state.client, }), None => transition!(PreparingTypeinfoComposite { @@ -82,7 +85,10 @@ impl PollTypeinfoComposite for TypeinfoComposite { state.client.set_typeinfo_composite_query(&statement); transition!(QueryingCompositeFields { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), client: state.client, }) } diff --git a/tokio-postgres/src/proto/typeinfo_enum.rs b/tokio-postgres/src/proto/typeinfo_enum.rs index d264d3ab7..dbc391070 100644 --- a/tokio-postgres/src/proto/typeinfo_enum.rs +++ b/tokio-postgres/src/proto/typeinfo_enum.rs @@ -8,7 +8,7 @@ use crate::proto::client::Client; use crate::proto::prepare::PrepareFuture; use crate::proto::query::QueryStream; use crate::proto::statement::Statement; -use crate::types::Oid; +use crate::types::{Oid, ToSql}; const TYPEINFO_ENUM_QUERY: &str = " SELECT enumlabel @@ -58,7 +58,10 @@ impl PollTypeinfoEnum for TypeinfoEnum { match state.client.typeinfo_enum_query() { Some(statement) => transition!(QueryingEnumVariants { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), client: state.client, }), None => transition!(PreparingTypeinfoEnum { @@ -98,7 +101,10 @@ impl PollTypeinfoEnum for TypeinfoEnum { state.client.set_typeinfo_enum_query(&statement); transition!(QueryingEnumVariants { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), client: state.client, }) } @@ -111,7 +117,10 @@ impl PollTypeinfoEnum for TypeinfoEnum { state.client.set_typeinfo_enum_query(&statement); transition!(QueryingEnumVariants { - future: state.client.query(&statement, &[&state.oid]).collect(), + future: state + .client + .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) + .collect(), client: state.client, }) } From 22484caaca3efd7fed740ecc96f2e5bd07ddc322 Mon Sep 17 00:00:00 2001 From: est31 Date: Wed, 10 Jul 2019 22:08:56 +0200 Subject: [PATCH 185/819] Remove rustc-serialize from README --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f0090e510..3a7cf7e2a 100644 --- a/README.md +++ b/README.md @@ -188,7 +188,7 @@ types. The driver currently supports the following conversions: - serialize::json::Json + postgres::types::Json and serde_json::Value (optional) @@ -313,10 +313,10 @@ implementations for `uuid`'s `Uuid` type. Requires `uuid` version 0.5. ### JSON/JSONB types [JSON and JSONB](http://www.postgresql.org/docs/9.4/static/datatype-json.html) -support is provided optionally by the `with-rustc-serialize` feature, which adds -`ToSql` and `FromSql` implementations for `rustc-serialize`'s `Json` type, and -the `with-serde_json` feature, which adds implementations for `serde_json`'s -`Value` type. Requires `serde_json` version 1.0, `rustc-serialize` version 0.3. +support is provided optionally by the `with-serde_json-1` feature, which adds +`ToSql` and `FromSql` implementations for `serde_json`'s `Value` type, +as well as adding a generic `Json` type with those same implementations. +Requires `serde_json` version 1.0. ### TIMESTAMP/TIMESTAMPTZ/DATE/TIME types From c0fcf34eb4877c2141ea9c823835db79c9153958 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2019 14:05:07 -0700 Subject: [PATCH 186/819] Reexport macros from tokio-postgres Closes #463 --- postgres/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 27989cde6..a96eb1684 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -62,7 +62,9 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] pub use tokio_postgres::Socket; -pub use tokio_postgres::{error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement}; +pub use tokio_postgres::{ + accepts, error, row, tls, to_sql_checked, types, Column, Portal, SimpleQueryMessage, Statement, +}; pub use crate::client::*; #[cfg(feature = "runtime")] From d91f9d8407085167f88098eff2f49bd4c1d99f88 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2019 16:09:45 -0700 Subject: [PATCH 187/819] gut tokio-postgres --- tokio-postgres/Cargo.toml | 18 +- tokio-postgres/src/config.rs | 817 ------------------ tokio-postgres/src/impls.rs | 235 ----- tokio-postgres/src/lib.rs | 429 +-------- tokio-postgres/src/proto/bind.rs | 73 -- tokio-postgres/src/proto/cancel_query.rs | 109 --- tokio-postgres/src/proto/cancel_query_raw.rs | 86 -- tokio-postgres/src/proto/client.rs | 376 -------- tokio-postgres/src/proto/codec.rs | 99 --- tokio-postgres/src/proto/connect.rs | 107 --- tokio-postgres/src/proto/connect_once.rs | 121 --- tokio-postgres/src/proto/connect_raw.rs | 442 ---------- tokio-postgres/src/proto/connect_socket.rs | 198 ----- tokio-postgres/src/proto/connection.rs | 321 ------- tokio-postgres/src/proto/copy_in.rs | 270 ------ tokio-postgres/src/proto/copy_out.rs | 105 --- tokio-postgres/src/proto/execute.rs | 68 -- tokio-postgres/src/proto/idle.rs | 47 - tokio-postgres/src/proto/maybe_tls_stream.rs | 88 -- tokio-postgres/src/proto/mod.rs | 66 -- tokio-postgres/src/proto/portal.rs | 39 - tokio-postgres/src/proto/prepare.rs | 228 ----- tokio-postgres/src/proto/query.rs | 121 --- tokio-postgres/src/proto/responses.rs | 42 - tokio-postgres/src/proto/simple_query.rs | 113 --- tokio-postgres/src/proto/statement.rs | 51 -- tokio-postgres/src/proto/tls.rs | 118 --- tokio-postgres/src/proto/transaction.rs | 108 --- tokio-postgres/src/proto/typeinfo.rs | 336 ------- .../src/proto/typeinfo_composite.rs | 150 ---- tokio-postgres/src/proto/typeinfo_enum.rs | 147 ---- tokio-postgres/src/row.rs | 243 ------ tokio-postgres/src/socket.rs | 101 --- tokio-postgres/src/stmt.rs | 24 - tokio-postgres/src/tls.rs | 141 --- 35 files changed, 9 insertions(+), 6028 deletions(-) delete mode 100644 tokio-postgres/src/config.rs delete mode 100644 tokio-postgres/src/impls.rs delete mode 100644 tokio-postgres/src/proto/bind.rs delete mode 100644 tokio-postgres/src/proto/cancel_query.rs delete mode 100644 tokio-postgres/src/proto/cancel_query_raw.rs delete mode 100644 tokio-postgres/src/proto/client.rs delete mode 100644 tokio-postgres/src/proto/codec.rs delete mode 100644 tokio-postgres/src/proto/connect.rs delete mode 100644 tokio-postgres/src/proto/connect_once.rs delete mode 100644 tokio-postgres/src/proto/connect_raw.rs delete mode 100644 tokio-postgres/src/proto/connect_socket.rs delete mode 100644 tokio-postgres/src/proto/connection.rs delete mode 100644 tokio-postgres/src/proto/copy_in.rs delete mode 100644 tokio-postgres/src/proto/copy_out.rs delete mode 100644 tokio-postgres/src/proto/execute.rs delete mode 100644 tokio-postgres/src/proto/idle.rs delete mode 100644 tokio-postgres/src/proto/maybe_tls_stream.rs delete mode 100644 tokio-postgres/src/proto/mod.rs delete mode 100644 tokio-postgres/src/proto/portal.rs delete mode 100644 tokio-postgres/src/proto/prepare.rs delete mode 100644 tokio-postgres/src/proto/query.rs delete mode 100644 tokio-postgres/src/proto/responses.rs delete mode 100644 tokio-postgres/src/proto/simple_query.rs delete mode 100644 tokio-postgres/src/proto/statement.rs delete mode 100644 tokio-postgres/src/proto/tls.rs delete mode 100644 tokio-postgres/src/proto/transaction.rs delete mode 100644 tokio-postgres/src/proto/typeinfo.rs delete mode 100644 tokio-postgres/src/proto/typeinfo_composite.rs delete mode 100644 tokio-postgres/src/proto/typeinfo_enum.rs delete mode 100644 tokio-postgres/src/row.rs delete mode 100644 tokio-postgres/src/socket.rs delete mode 100644 tokio-postgres/src/stmt.rs delete mode 100644 tokio-postgres/src/tls.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 8d2d6d00a..f7a5cc1ce 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -21,7 +21,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_static"] +runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "tokio-threadpool", "lazy_static"] "with-bit-vec-0_5" = ["bit-vec-05"] "with-chrono-0_4" = ["chrono-04"] @@ -34,19 +34,17 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] antidote = "1.0" bytes = "0.4" fallible-iterator = "0.2" -futures = "0.1.7" log = "0.4" percent-encoding = "1.0" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -state_machine_future = "0.2" -tokio-codec = "0.1" -tokio-io = "0.1" +tokio-codec = { git = "https://github.com/tokio-rs/tokio" } +tokio-io = { git = "https://github.com/tokio-rs/tokio" } -tokio-tcp = { version = "0.1", optional = true } -futures-cpupool = { version = "0.1", optional = true } +tokio-tcp = { git = "https://github.com/tokio-rs/tokio", optional = true } +tokio-threadpool = { git = "https://github.com/tokio-rs/tokio", optional = true } lazy_static = { version = "1.0", optional = true } -tokio-timer = { version = "0.2", optional = true } +tokio-timer = { git = "https://github.com/tokio-rs/tokio", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } @@ -57,8 +55,8 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [target.'cfg(unix)'.dependencies] -tokio-uds = { version = "0.2", optional = true } +tokio-uds = { git = "https://github.com/tokio-rs/tokio", optional = true } [dev-dependencies] -tokio = "0.1.7" +tokio = { git = "https://github.com/tokio-rs/tokio" } env_logger = "0.5" diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs deleted file mode 100644 index 96c6cfca3..000000000 --- a/tokio-postgres/src/config.rs +++ /dev/null @@ -1,817 +0,0 @@ -//! Connection configuration. - -use std::borrow::Cow; -use std::error; -#[cfg(unix)] -use std::ffi::OsStr; -use std::fmt; -use std::iter; -use std::mem; -#[cfg(unix)] -use std::os::unix::ffi::OsStrExt; -#[cfg(unix)] -use std::path::{Path, PathBuf}; -use std::str::{self, FromStr}; -use std::sync::Arc; -use std::time::Duration; -use tokio_io::{AsyncRead, AsyncWrite}; - -#[cfg(feature = "runtime")] -use crate::impls::Connect; -use crate::impls::ConnectRaw; -#[cfg(feature = "runtime")] -use crate::proto::ConnectFuture; -use crate::proto::ConnectRawFuture; -use crate::{Error, TlsConnect}; -#[cfg(feature = "runtime")] -use crate::{MakeTlsConnect, Socket}; - -/// Properties required of a session. -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum TargetSessionAttrs { - /// No special properties are required. - Any, - /// The session must allow writes. - ReadWrite, - #[doc(hidden)] - __NonExhaustive, -} - -/// TLS configuration. -#[derive(Debug, Copy, Clone, PartialEq)] -pub enum SslMode { - /// Do not use TLS. - Disable, - /// Attempt to connect with TLS but allow sessions without. - Prefer, - /// Require the use of TLS. - Require, - #[doc(hidden)] - __NonExhaustive, -} - -#[derive(Debug, Clone, PartialEq)] -pub(crate) enum Host { - Tcp(String), - #[cfg(unix)] - Unix(PathBuf), -} - -#[derive(Clone, PartialEq)] -pub(crate) struct Inner { - pub(crate) user: Option, - pub(crate) password: Option>, - pub(crate) dbname: Option, - pub(crate) options: Option, - pub(crate) application_name: Option, - pub(crate) ssl_mode: SslMode, - pub(crate) host: Vec, - pub(crate) port: Vec, - pub(crate) connect_timeout: Option, - pub(crate) keepalives: bool, - pub(crate) keepalives_idle: Duration, - pub(crate) target_session_attrs: TargetSessionAttrs, -} - -/// Connection configuration. -/// -/// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: -/// -/// # Key-Value -/// -/// This format consists of space-separated key-value pairs. Values which are either the empty string or contain -/// whitespace should be wrapped in `'`. `'` and `\` characters should be backslash-escaped. -/// -/// ## Keys -/// -/// * `user` - The username to authenticate with. Required. -/// * `password` - The password to authenticate with. -/// * `dbname` - The name of the database to connect to. Defaults to the username. -/// * `options` - Command line options used to configure the server. -/// * `application_name` - Sets the `application_name` parameter on the server. -/// * `sslmode` - Controls usage of TLS. If set to `disable`, TLS will not be used. If set to `prefer`, TLS will be used -/// if available, but not used otherwise. If set to `require`, TLS will be forced to be used. Defaults to `prefer`. -/// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the -/// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts -/// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting -/// with the `connect` method. -/// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be -/// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if -/// omitted or the empty string. -/// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames -/// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. -/// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. -/// This option is ignored when connecting with Unix sockets. Defaults to on. -/// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. -/// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. -/// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that -/// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server -/// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. -/// -/// ## Examples -/// -/// ```not_rust -/// host=localhost user=postgres connect_timeout=10 keepalives=0 -/// ``` -/// -/// ```not_rust -/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' -/// ``` -/// -/// ```not_rust -/// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write -/// ``` -/// -/// # Url -/// -/// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, -/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple -/// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, -/// as the path component of the URL specifies the database name. -/// -/// ## Examples -/// -/// ```not_rust -/// postgresql://user@localhost -/// ``` -/// -/// ```not_rust -/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 -/// ``` -/// -/// ```not_rust -/// postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write -/// ``` -/// -/// ```not_rust -/// postgresql:///mydb?user=user&host=/var/lib/postgresql -/// ``` -#[derive(Clone, PartialEq)] -pub struct Config(pub(crate) Arc); - -impl Default for Config { - fn default() -> Config { - Config::new() - } -} - -impl Config { - /// Creates a new configuration. - pub fn new() -> Config { - Config(Arc::new(Inner { - user: None, - password: None, - dbname: None, - options: None, - application_name: None, - ssl_mode: SslMode::Prefer, - host: vec![], - port: vec![], - connect_timeout: None, - keepalives: true, - keepalives_idle: Duration::from_secs(2 * 60 * 60), - target_session_attrs: TargetSessionAttrs::Any, - })) - } - - /// Sets the user to authenticate with. - /// - /// Required. - pub fn user(&mut self, user: &str) -> &mut Config { - Arc::make_mut(&mut self.0).user = Some(user.to_string()); - self - } - - /// Sets the password to authenticate with. - pub fn password(&mut self, password: T) -> &mut Config - where - T: AsRef<[u8]>, - { - Arc::make_mut(&mut self.0).password = Some(password.as_ref().to_vec()); - self - } - - /// Sets the name of the database to connect to. - /// - /// Defaults to the user. - pub fn dbname(&mut self, dbname: &str) -> &mut Config { - Arc::make_mut(&mut self.0).dbname = Some(dbname.to_string()); - self - } - - /// Sets command line options used to configure the server. - pub fn options(&mut self, options: &str) -> &mut Config { - Arc::make_mut(&mut self.0).options = Some(options.to_string()); - self - } - - /// Sets the value of the `application_name` runtime parameter. - pub fn application_name(&mut self, application_name: &str) -> &mut Config { - Arc::make_mut(&mut self.0).application_name = Some(application_name.to_string()); - self - } - - /// Sets the SSL configuration. - /// - /// Defaults to `prefer`. - pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { - Arc::make_mut(&mut self.0).ssl_mode = ssl_mode; - self - } - - /// Adds a host to the configuration. - /// - /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix - /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. - pub fn host(&mut self, host: &str) -> &mut Config { - #[cfg(unix)] - { - if host.starts_with('/') { - return self.host_path(host); - } - } - - Arc::make_mut(&mut self.0) - .host - .push(Host::Tcp(host.to_string())); - self - } - - /// Adds a Unix socket host to the configuration. - /// - /// Unlike `host`, this method allows non-UTF8 paths. - #[cfg(unix)] - pub fn host_path(&mut self, host: T) -> &mut Config - where - T: AsRef, - { - Arc::make_mut(&mut self.0) - .host - .push(Host::Unix(host.as_ref().to_path_buf())); - self - } - - /// Adds a port to the configuration. - /// - /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which - /// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports - /// as hosts. - pub fn port(&mut self, port: u16) -> &mut Config { - Arc::make_mut(&mut self.0).port.push(port); - self - } - - /// Sets the timeout applied to socket-level connection attempts. - /// - /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each - /// host separately. Defaults to no limit. - pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { - Arc::make_mut(&mut self.0).connect_timeout = Some(connect_timeout); - self - } - - /// Controls the use of TCP keepalive. - /// - /// This is ignored for Unix domain socket connections. Defaults to `true`. - pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { - Arc::make_mut(&mut self.0).keepalives = keepalives; - self - } - - /// Sets the amount of idle time before a keepalive packet is sent on the connection. - /// - /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. - pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { - Arc::make_mut(&mut self.0).keepalives_idle = keepalives_idle; - self - } - - /// Sets the requirements of the session. - /// - /// This can be used to connect to the primary server in a clustered database rather than one of the read-only - /// secondary servers. Defaults to `Any`. - pub fn target_session_attrs( - &mut self, - target_session_attrs: TargetSessionAttrs, - ) -> &mut Config { - Arc::make_mut(&mut self.0).target_session_attrs = target_session_attrs; - self - } - - fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { - match key { - "user" => { - self.user(&value); - } - "password" => { - self.password(value); - } - "dbname" => { - self.dbname(&value); - } - "options" => { - self.options(&value); - } - "application_name" => { - self.application_name(&value); - } - "sslmode" => { - let mode = match value { - "disable" => SslMode::Disable, - "prefer" => SslMode::Prefer, - "require" => SslMode::Require, - _ => return Err(Error::config_parse(Box::new(InvalidValue("sslmode")))), - }; - self.ssl_mode(mode); - } - "host" => { - for host in value.split(',') { - self.host(host); - } - } - "port" => { - for port in value.split(',') { - let port = if port.is_empty() { - 5432 - } else { - port.parse() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("port"))))? - }; - self.port(port); - } - } - "connect_timeout" => { - let timeout = value - .parse::() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("connect_timeout"))))?; - if timeout > 0 { - self.connect_timeout(Duration::from_secs(timeout as u64)); - } - } - "keepalives" => { - let keepalives = value - .parse::() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; - self.keepalives(keepalives != 0); - } - "keepalives_idle" => { - let keepalives_idle = value - .parse::() - .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives_idle"))))?; - if keepalives_idle > 0 { - self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); - } - } - "target_session_attrs" => { - let target_session_attrs = match &*value { - "any" => TargetSessionAttrs::Any, - "read-write" => TargetSessionAttrs::ReadWrite, - _ => { - return Err(Error::config_parse(Box::new(InvalidValue( - "target_session_attrs", - )))); - } - }; - self.target_session_attrs(target_session_attrs); - } - key => { - return Err(Error::config_parse(Box::new(UnknownOption( - key.to_string(), - )))); - } - } - - Ok(()) - } - - /// Opens a connection to a PostgreSQL database. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] - pub fn connect(&self, tls: T) -> Connect - where - T: MakeTlsConnect, - { - Connect(ConnectFuture::new(tls, Ok(self.clone()))) - } - - /// Connects to a PostgreSQL database over an arbitrary stream. - /// - /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. - pub fn connect_raw(&self, stream: S, tls: T) -> ConnectRaw - where - S: AsyncRead + AsyncWrite, - T: TlsConnect, - { - ConnectRaw(ConnectRawFuture::new(stream, tls, self.clone(), None)) - } -} - -impl FromStr for Config { - type Err = Error; - - fn from_str(s: &str) -> Result { - match UrlParser::parse(s)? { - Some(config) => Ok(config), - None => Parser::parse(s), - } - } -} - -// Omit password from debug output -impl fmt::Debug for Config { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - struct Redaction {} - impl fmt::Debug for Redaction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "_") - } - } - - f.debug_struct("Config") - .field("user", &self.0.user) - .field("password", &self.0.password.as_ref().map(|_| Redaction {})) - .field("dbname", &self.0.dbname) - .field("options", &self.0.options) - .field("application_name", &self.0.application_name) - .field("ssl_mode", &self.0.ssl_mode) - .field("host", &self.0.host) - .field("port", &self.0.port) - .field("connect_timeout", &self.0.connect_timeout) - .field("keepalives", &self.0.keepalives) - .field("keepalives_idle", &self.0.keepalives_idle) - .field("target_session_attrs", &self.0.target_session_attrs) - .finish() - } -} - -#[derive(Debug)] -struct UnknownOption(String); - -impl fmt::Display for UnknownOption { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "unknown option `{}`", self.0) - } -} - -impl error::Error for UnknownOption {} - -#[derive(Debug)] -struct InvalidValue(&'static str); - -impl fmt::Display for InvalidValue { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "invalid value for option `{}`", self.0) - } -} - -impl error::Error for InvalidValue {} - -struct Parser<'a> { - s: &'a str, - it: iter::Peekable>, -} - -impl<'a> Parser<'a> { - fn parse(s: &'a str) -> Result { - let mut parser = Parser { - s, - it: s.char_indices().peekable(), - }; - - let mut config = Config::new(); - - while let Some((key, value)) = parser.parameter()? { - config.param(key, &value)?; - } - - Ok(config) - } - - fn skip_ws(&mut self) { - self.take_while(char::is_whitespace); - } - - fn take_while(&mut self, f: F) -> &'a str - where - F: Fn(char) -> bool, - { - let start = match self.it.peek() { - Some(&(i, _)) => i, - None => return "", - }; - - loop { - match self.it.peek() { - Some(&(_, c)) if f(c) => { - self.it.next(); - } - Some(&(i, _)) => return &self.s[start..i], - None => return &self.s[start..], - } - } - } - - fn eat(&mut self, target: char) -> Result<(), Error> { - match self.it.next() { - Some((_, c)) if c == target => Ok(()), - Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}`", - i, target, c - ); - Err(Error::config_parse(m.into())) - } - None => Err(Error::config_parse("unexpected EOF".into())), - } - } - - fn eat_if(&mut self, target: char) -> bool { - match self.it.peek() { - Some(&(_, c)) if c == target => { - self.it.next(); - true - } - _ => false, - } - } - - fn keyword(&mut self) -> Option<&'a str> { - let s = self.take_while(|c| match c { - c if c.is_whitespace() => false, - '=' => false, - _ => true, - }); - - if s.is_empty() { - None - } else { - Some(s) - } - } - - fn value(&mut self) -> Result { - let value = if self.eat_if('\'') { - let value = self.quoted_value()?; - self.eat('\'')?; - value - } else { - self.simple_value()? - }; - - Ok(value) - } - - fn simple_value(&mut self) -> Result { - let mut value = String::new(); - - while let Some(&(_, c)) = self.it.peek() { - if c.is_whitespace() { - break; - } - - self.it.next(); - if c == '\\' { - if let Some((_, c2)) = self.it.next() { - value.push(c2); - } - } else { - value.push(c); - } - } - - if value.is_empty() { - return Err(Error::config_parse("unexpected EOF".into())); - } - - Ok(value) - } - - fn quoted_value(&mut self) -> Result { - let mut value = String::new(); - - while let Some(&(_, c)) = self.it.peek() { - if c == '\'' { - return Ok(value); - } - - self.it.next(); - if c == '\\' { - if let Some((_, c2)) = self.it.next() { - value.push(c2); - } - } else { - value.push(c); - } - } - - Err(Error::config_parse( - "unterminated quoted connection parameter value".into(), - )) - } - - fn parameter(&mut self) -> Result, Error> { - self.skip_ws(); - let keyword = match self.keyword() { - Some(keyword) => keyword, - None => return Ok(None), - }; - self.skip_ws(); - self.eat('=')?; - self.skip_ws(); - let value = self.value()?; - - Ok(Some((keyword, value))) - } -} - -// This is a pretty sloppy "URL" parser, but it matches the behavior of libpq, where things really aren't very strict -struct UrlParser<'a> { - s: &'a str, - config: Config, -} - -impl<'a> UrlParser<'a> { - fn parse(s: &'a str) -> Result, Error> { - let s = match Self::remove_url_prefix(s) { - Some(s) => s, - None => return Ok(None), - }; - - let mut parser = UrlParser { - s, - config: Config::new(), - }; - - parser.parse_credentials()?; - parser.parse_host()?; - parser.parse_path()?; - parser.parse_params()?; - - Ok(Some(parser.config)) - } - - fn remove_url_prefix(s: &str) -> Option<&str> { - for prefix in &["postgres://", "postgresql://"] { - if s.starts_with(prefix) { - return Some(&s[prefix.len()..]); - } - } - - None - } - - fn take_until(&mut self, end: &[char]) -> Option<&'a str> { - match self.s.find(end) { - Some(pos) => { - let (head, tail) = self.s.split_at(pos); - self.s = tail; - Some(head) - } - None => None, - } - } - - fn take_all(&mut self) -> &'a str { - mem::replace(&mut self.s, "") - } - - fn eat_byte(&mut self) { - self.s = &self.s[1..]; - } - - fn parse_credentials(&mut self) -> Result<(), Error> { - let creds = match self.take_until(&['@']) { - Some(creds) => creds, - None => return Ok(()), - }; - self.eat_byte(); - - let mut it = creds.splitn(2, ':'); - let user = self.decode(it.next().unwrap())?; - self.config.user(&user); - - if let Some(password) = it.next() { - let password = Cow::from(percent_encoding::percent_decode(password.as_bytes())); - self.config.password(password); - } - - Ok(()) - } - - fn parse_host(&mut self) -> Result<(), Error> { - let host = match self.take_until(&['/', '?']) { - Some(host) => host, - None => self.take_all(), - }; - - if host.is_empty() { - return Ok(()); - } - - for chunk in host.split(',') { - let (host, port) = if chunk.starts_with('[') { - let idx = match chunk.find(']') { - Some(idx) => idx, - None => return Err(Error::config_parse(InvalidValue("host").into())), - }; - - let host = &chunk[1..idx]; - let remaining = &chunk[idx + 1..]; - let port = if remaining.starts_with(':') { - Some(&remaining[1..]) - } else if remaining.is_empty() { - None - } else { - return Err(Error::config_parse(InvalidValue("host").into())); - }; - - (host, port) - } else { - let mut it = chunk.splitn(2, ':'); - (it.next().unwrap(), it.next()) - }; - - self.host_param(host)?; - let port = self.decode(port.unwrap_or("5432"))?; - self.config.param("port", &port)?; - } - - Ok(()) - } - - fn parse_path(&mut self) -> Result<(), Error> { - if !self.s.starts_with('/') { - return Ok(()); - } - self.eat_byte(); - - let dbname = match self.take_until(&['?']) { - Some(dbname) => dbname, - None => self.take_all(), - }; - - if !dbname.is_empty() { - self.config.dbname(&self.decode(dbname)?); - } - - Ok(()) - } - - fn parse_params(&mut self) -> Result<(), Error> { - if !self.s.starts_with('?') { - return Ok(()); - } - self.eat_byte(); - - while !self.s.is_empty() { - let key = match self.take_until(&['=']) { - Some(key) => self.decode(key)?, - None => return Err(Error::config_parse("unterminated parameter".into())), - }; - self.eat_byte(); - - let value = match self.take_until(&['&']) { - Some(value) => { - self.eat_byte(); - value - } - None => self.take_all(), - }; - - if key == "host" { - self.host_param(value)?; - } else { - let value = self.decode(value)?; - self.config.param(&key, &value)?; - } - } - - Ok(()) - } - - #[cfg(unix)] - fn host_param(&mut self, s: &str) -> Result<(), Error> { - let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); - if decoded.get(0) == Some(&b'/') { - self.config.host_path(OsStr::from_bytes(&decoded)); - } else { - let decoded = str::from_utf8(&decoded).map_err(|e| Error::config_parse(Box::new(e)))?; - self.config.host(decoded); - } - - Ok(()) - } - - #[cfg(not(unix))] - fn host_param(&mut self, s: &str) -> Result<(), Error> { - let s = self.decode(s)?; - self.config.param("host", &s) - } - - fn decode(&self, s: &'a str) -> Result, Error> { - percent_encoding::percent_decode(s.as_bytes()) - .decode_utf8() - .map_err(|e| Error::config_parse(e.into())) - } -} diff --git a/tokio-postgres/src/impls.rs b/tokio-postgres/src/impls.rs deleted file mode 100644 index 520a4aff8..000000000 --- a/tokio-postgres/src/impls.rs +++ /dev/null @@ -1,235 +0,0 @@ -//! Futures and stream types used in the crate. -use bytes::{Bytes, IntoBuf}; -use futures::{try_ready, Async, Future, Poll, Stream}; -use std::error; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::proto; -use crate::{Client, Connection, Error, Portal, Row, SimpleQueryMessage, Statement, TlsConnect}; -#[cfg(feature = "runtime")] -use crate::{MakeTlsConnect, Socket}; - -/// The future returned by `Client::cancel_query_raw`. -#[must_use = "futures do nothing unless polled"] -pub struct CancelQueryRaw(pub(crate) proto::CancelQueryRawFuture) -where - S: AsyncRead + AsyncWrite, - T: TlsConnect; - -impl Future for CancelQueryRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() - } -} - -/// The future returned by `Client::cancel_query`. -#[cfg(feature = "runtime")] -#[must_use = "futures do nothing unless polled"] -pub struct CancelQuery(pub(crate) proto::CancelQueryFuture) -where - T: MakeTlsConnect; - -#[cfg(feature = "runtime")] -impl Future for CancelQuery -where - T: MakeTlsConnect, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() - } -} - -/// The future returned by `Config::connect_raw`. -#[must_use = "futures do nothing unless polled"] -pub struct ConnectRaw(pub(crate) proto::ConnectRawFuture) -where - S: AsyncRead + AsyncWrite, - T: TlsConnect; - -impl Future for ConnectRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - type Item = (Client, Connection); - type Error = Error; - - fn poll(&mut self) -> Poll<(Client, Connection), Error> { - let (client, connection) = try_ready!(self.0.poll()); - - Ok(Async::Ready((Client(client), Connection(connection)))) - } -} - -/// The future returned by `Config::connect`. -#[cfg(feature = "runtime")] -#[must_use = "futures do nothing unless polled"] -pub struct Connect(pub(crate) proto::ConnectFuture) -where - T: MakeTlsConnect; - -#[cfg(feature = "runtime")] -impl Future for Connect -where - T: MakeTlsConnect, -{ - type Item = (Client, Connection); - type Error = Error; - - fn poll(&mut self) -> Poll<(Client, Connection), Error> { - let (client, connection) = try_ready!(self.0.poll()); - - Ok(Async::Ready((Client(client), Connection(connection)))) - } -} - -/// The future returned by `Client::prepare`. -#[must_use = "futures do nothing unless polled"] -pub struct Prepare(pub(crate) proto::PrepareFuture); - -impl Future for Prepare { - type Item = Statement; - type Error = Error; - - fn poll(&mut self) -> Poll { - let statement = try_ready!(self.0.poll()); - - Ok(Async::Ready(Statement(statement))) - } -} - -/// The future returned by `Client::query`. -#[must_use = "streams do nothing unless polled"] -pub struct Query(pub(crate) proto::QueryStream); - -impl Stream for Query { - type Item = Row; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - -/// The future returned by `Client::execute`. -#[must_use = "futures do nothing unless polled"] -pub struct Execute(pub(crate) proto::ExecuteFuture); - -impl Future for Execute { - type Item = u64; - type Error = Error; - - fn poll(&mut self) -> Poll { - self.0.poll() - } -} - -/// The future returned by `Client::bind`. -#[must_use = "futures do nothing unless polled"] -pub struct Bind(pub(crate) proto::BindFuture); - -impl Future for Bind { - type Item = Portal; - type Error = Error; - - fn poll(&mut self) -> Poll { - match self.0.poll() { - Ok(Async::Ready(portal)) => Ok(Async::Ready(Portal(portal))), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(e) => Err(e), - } - } -} - -/// The future returned by `Client::query_portal`. -#[must_use = "streams do nothing unless polled"] -pub struct QueryPortal(pub(crate) proto::QueryStream); - -impl Stream for QueryPortal { - type Item = Row; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - -/// The future returned by `Client::copy_in`. -#[must_use = "futures do nothing unless polled"] -pub struct CopyIn(pub(crate) proto::CopyInFuture) -where - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - S::Error: Into>; - -impl Future for CopyIn -where - S: Stream, - S::Item: IntoBuf, - ::Buf: Send, - S::Error: Into>, -{ - type Item = u64; - type Error = Error; - - fn poll(&mut self) -> Poll { - self.0.poll() - } -} - -/// The future returned by `Client::copy_out`. -#[must_use = "streams do nothing unless polled"] -pub struct CopyOut(pub(crate) proto::CopyOutStream); - -impl Stream for CopyOut { - type Item = Bytes; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - -/// The stream returned by `Client::simple_query`. -#[must_use = "streams do nothing unless polled"] -pub struct SimpleQuery(pub(crate) proto::SimpleQueryStream); - -impl Stream for SimpleQuery { - type Item = SimpleQueryMessage; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - self.0.poll() - } -} - -/// The future returned by `TransactionBuilder::build`. -#[must_use = "futures do nothing unless polled"] -pub struct Transaction(pub(crate) proto::TransactionFuture) -where - T: Future, - T::Error: From; - -impl Future for Transaction -where - T: Future, - T::Error: From, -{ - type Item = T::Item; - type Error = T::Error; - - fn poll(&mut self) -> Poll { - self.0.poll() - } -} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 1aaf82020..14c0df497 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -110,434 +110,7 @@ //! crates, respectively. #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +#![feature(async_await)] -use bytes::IntoBuf; -use futures::{Future, Poll, Stream}; -use std::error::Error as StdError; -use std::sync::atomic::{AtomicUsize, Ordering}; -use tokio_io::{AsyncRead, AsyncWrite}; - -pub use crate::config::Config; -use crate::error::DbError; -pub use crate::error::Error; -pub use crate::row::{Row, SimpleQueryRow}; -#[cfg(feature = "runtime")] -pub use crate::socket::Socket; -pub use crate::stmt::Column; -#[cfg(feature = "runtime")] -use crate::tls::MakeTlsConnect; -pub use crate::tls::NoTls; -use crate::tls::TlsConnect; -use crate::types::{ToSql, Type}; - -pub mod config; pub mod error; -pub mod impls; -mod proto; -pub mod row; -#[cfg(feature = "runtime")] -mod socket; -mod stmt; -pub mod tls; pub mod types; - -fn next_statement() -> String { - static ID: AtomicUsize = AtomicUsize::new(0); - format!("s{}", ID.fetch_add(1, Ordering::SeqCst)) -} - -fn next_portal() -> String { - static ID: AtomicUsize = AtomicUsize::new(0); - format!("p{}", ID.fetch_add(1, Ordering::SeqCst)) -} - -/// A convenience function which parses a connection string and connects to the database. -/// -/// See the documentation for [`Config`] for details on the connection string format. -/// -/// Requires the `runtime` Cargo feature (enabled by default). -/// -/// [`Config`]: ./Config.t.html -#[cfg(feature = "runtime")] -pub fn connect(config: &str, tls: T) -> impls::Connect -where - T: MakeTlsConnect, -{ - impls::Connect(proto::ConnectFuture::new(tls, config.parse())) -} - -/// An asynchronous PostgreSQL client. -/// -/// The client is one half of what is returned when a connection is established. Users interact with the database -/// through this client object. -pub struct Client(proto::Client); - -impl Client { - /// Creates a new prepared statement. - /// - /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), - /// which are set when executed. Prepared statements can only be used with the connection that created them. - pub fn prepare(&mut self, query: &str) -> impls::Prepare { - self.prepare_typed(query, &[]) - } - - /// Like `prepare`, but allows the types of query parameters to be explicitly specified. - /// - /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be - /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. - pub fn prepare_typed(&mut self, query: &str, param_types: &[Type]) -> impls::Prepare { - impls::Prepare(self.0.prepare(next_statement(), query, param_types)) - } - - /// Executes a statement, returning the number of rows modified. - /// - /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - pub fn execute(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Execute { - self.execute_iter(statement, params.iter().cloned()) - } - - /// Like [`execute`], but takes an iterator of parameters rather than a slice. - /// - /// [`execute`]: #method.execute - pub fn execute_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Execute - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - impls::Execute(self.0.execute(&statement.0, params)) - } - - /// Executes a statement, returning a stream of the resulting rows. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - pub fn query(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Query { - self.query_iter(statement, params.iter().cloned()) - } - - /// Like [`query`], but takes an iterator of parameters rather than a slice. - /// - /// [`query`]: #method.query - pub fn query_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Query - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - impls::Query(self.0.query(&statement.0, params)) - } - - /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. - /// - /// Portals only last for the duration of the transaction in which they are created - in particular, a portal - /// created outside of a transaction is immediately destroyed. Portals can only be used on the connection that - /// created them. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - pub fn bind(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::Bind { - self.bind_iter(statement, params.iter().cloned()) - } - - /// Like [`bind`], but takes an iterator of parameters rather than a slice. - /// - /// [`bind`]: #method.bind - pub fn bind_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::Bind - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - impls::Bind(self.0.bind(&statement.0, next_portal(), params)) - } - - /// Continues execution of a portal, returning a stream of the resulting rows. - /// - /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to - /// query_portal. If the requested number is negative or 0, all rows will be returned. - pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> impls::QueryPortal { - impls::QueryPortal(self.0.query_portal(&portal.0, max_rows)) - } - - /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. - /// - /// The data in the provided stream is passed along to the server verbatim; it is the caller's responsibility to - /// ensure it uses the proper format. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - pub fn copy_in( - &mut self, - statement: &Statement, - params: &[&dyn ToSql], - stream: S, - ) -> impls::CopyIn - where - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - // FIXME error type? - S::Error: Into>, - { - self.copy_in_iter(statement, params.iter().cloned(), stream) - } - - /// Like [`copy_in`], except that it takes an iterator of parameters rather than a slice. - /// - /// [`copy_in`]: #method.copy_in - pub fn copy_in_iter<'a, I, S>( - &mut self, - statement: &Statement, - params: I, - stream: S, - ) -> impls::CopyIn - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - // FIXME error type? - S::Error: Into>, - { - impls::CopyIn(self.0.copy_in(&statement.0, params, stream)) - } - - /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - pub fn copy_out(&mut self, statement: &Statement, params: &[&dyn ToSql]) -> impls::CopyOut { - self.copy_out_iter(statement, params.iter().cloned()) - } - - /// Like [`copy_out`], except that it takes an iterator of parameters rather than a slice. - /// - /// [`copy_out`]: #method.copy_out - pub fn copy_out_iter<'a, I>(&mut self, statement: &Statement, params: I) -> impls::CopyOut - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - impls::CopyOut(self.0.copy_out(&statement.0, params)) - } - - /// Executes a sequence of SQL statements using the simple query protocol. - /// - /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that - /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, - /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a stream over the - /// rows, this method returns a stream over an enum which indicates either the completion of one of the commands, - /// or a row of data. This preserves the framing between the separate statements in the request. - /// - /// # Warning - /// - /// Prepared statements should be use for any query which contains user-specified data, as they provided the - /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass - /// them to this method! - pub fn simple_query(&mut self, query: &str) -> impls::SimpleQuery { - impls::SimpleQuery(self.0.simple_query(query)) - } - - /// A utility method to wrap a future in a database transaction. - /// - /// The returned future will start a transaction and then run the provided future. If the future returns `Ok`, it - /// will commit the transaction, and if it returns `Err`, it will roll the transaction back. - /// - /// This is simply a convenience API; it's roughly equivalent to: - /// - /// ```ignore - /// client.batch_execute("BEGIN") - /// .and_then(your_future) - /// .and_then(client.batch_execute("COMMIT")) - /// .or_else(|e| client.batch_execute("ROLLBACK").then(|_| Err(e))) - /// ``` - /// - /// # Warning - /// - /// Unlike the other futures created by a client, this future is *not* atomic with respect to other requests. If you - /// attempt to execute it concurrently with other futures created by the same connection, they will interleave! - pub fn build_transaction(&mut self) -> TransactionBuilder { - TransactionBuilder(self.0.clone()) - } - - /// Attempts to cancel an in-progress query. - /// - /// The server provides no information about whether a cancellation attempt was successful or not. An error will - /// only be returned if the client was unable to connect to the database. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] - pub fn cancel_query(&mut self, make_tls_mode: T) -> impls::CancelQuery - where - T: MakeTlsConnect, - { - impls::CancelQuery(self.0.cancel_query(make_tls_mode)) - } - - /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new - /// connection itself. - pub fn cancel_query_raw(&mut self, stream: S, tls_mode: T) -> impls::CancelQueryRaw - where - S: AsyncRead + AsyncWrite, - T: TlsConnect, - { - impls::CancelQueryRaw(self.0.cancel_query_raw(stream, tls_mode)) - } - - /// Determines if the connection to the server has already closed. - /// - /// In that case, all future queries will fail. - pub fn is_closed(&self) -> bool { - self.0.is_closed() - } - - /// Polls the client to check if it is idle. - /// - /// A connection is idle if there are no outstanding requests, whether they have begun being polled or not. For - /// example, this can be used by a connection pool to ensure that all work done by one checkout is done before - /// making the client available for a new request. Otherwise, any non-completed work from the first request could - /// interleave with the second. - pub fn poll_idle(&mut self) -> Poll<(), Error> { - self.0.poll_idle() - } -} - -/// A connection to a PostgreSQL database. -/// -/// This is one half of what is returned when a new connection is established. It performs the actual IO with the -/// server, and should generally be spawned off onto an executor to run in the background. -/// -/// `Connection` implements `Future`, and only resolves when the connection is closed, either because a fatal error has -/// occurred, or because its associated `Client` has dropped and all outstanding work has completed. -#[must_use = "futures do nothing unless polled"] -pub struct Connection(proto::Connection>); - -impl Connection -where - S: AsyncRead + AsyncWrite, - T: AsyncRead + AsyncWrite, -{ - /// Returns the value of a runtime parameter for this connection. - pub fn parameter(&self, name: &str) -> Option<&str> { - self.0.parameter(name) - } - - /// Polls for asynchronous messages from the server. - /// - /// The server can send notices as well as notifications asynchronously to the client. Applications which wish to - /// examine those messages should use this method to drive the connection rather than its `Future` implementation. - pub fn poll_message(&mut self) -> Poll, Error> { - self.0.poll_message() - } -} - -impl Future for Connection -where - S: AsyncRead + AsyncWrite, - T: AsyncRead + AsyncWrite, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - self.0.poll() - } -} - -/// An asynchronous message from the server. -#[allow(clippy::large_enum_variant)] -pub enum AsyncMessage { - /// A notice. - /// - /// Notices use the same format as errors, but aren't "errors" per-se. - Notice(DbError), - /// A notification. - /// - /// Connections can subscribe to notifications with the `LISTEN` command. - Notification(Notification), - #[doc(hidden)] - __NonExhaustive, -} - -/// A prepared statement. -/// -/// Prepared statements can only be used with the connection that created them. -#[derive(Clone)] -pub struct Statement(proto::Statement); - -impl Statement { - /// Returns the expected types of the statement's parameters. - pub fn params(&self) -> &[Type] { - self.0.params() - } - - /// Returns information about the columns returned when the statement is queried. - pub fn columns(&self) -> &[Column] { - self.0.columns() - } -} - -/// A portal. -/// -/// Portals can only be used with the connection that created them, and only exist for the duration of the transaction -/// in which they were created. -pub struct Portal(proto::Portal); - -/// A builder type which can wrap a future in a database transaction. -pub struct TransactionBuilder(proto::Client); - -impl TransactionBuilder { - /// Returns a future which wraps another in a database transaction. - pub fn build(self, future: T) -> impls::Transaction - where - T: Future, - // FIXME error type? - T::Error: From, - { - impls::Transaction(proto::TransactionFuture::new(self.0, future)) - } -} - -/// Message returned by the `SimpleQuery` stream. -pub enum SimpleQueryMessage { - /// A row of data. - Row(SimpleQueryRow), - /// A statement in the query has completed. - /// - /// The number of rows modified or selected is returned. - CommandComplete(u64), - #[doc(hidden)] - __NonExhaustive, -} - -/// An asynchronous notification. -#[derive(Clone, Debug)] -pub struct Notification { - process_id: i32, - channel: String, - payload: String, -} - -impl Notification { - /// The process ID of the notifying backend process. - pub fn process_id(&self) -> i32 { - self.process_id - } - - /// The name of the channel that the notify has been raised on. - pub fn channel(&self) -> &str { - &self.channel - } - - /// The "payload" string passed from the notifying process. - pub fn payload(&self) -> &str { - &self.payload - } -} diff --git a/tokio-postgres/src/proto/bind.rs b/tokio-postgres/src/proto/bind.rs deleted file mode 100644 index 944afd6ea..000000000 --- a/tokio-postgres/src/proto/bind.rs +++ /dev/null @@ -1,73 +0,0 @@ -use futures::{try_ready, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::portal::Portal; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::Error; - -#[derive(StateMachineFuture)] -pub enum Bind { - #[state_machine_future(start, transitions(ReadBindComplete))] - Start { - client: Client, - request: PendingRequest, - name: String, - statement: Statement, - }, - #[state_machine_future(transitions(Finished))] - ReadBindComplete { - receiver: Responses, - client: Client, - name: String, - statement: Statement, - }, - #[state_machine_future(ready)] - Finished(Portal), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollBind for Bind { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - let receiver = state.client.send(state.request)?; - - transition!(ReadBindComplete { - receiver, - client: state.client, - name: state.name, - statement: state.statement, - }) - } - - fn poll_read_bind_complete<'a>( - state: &'a mut RentToOwn<'a, ReadBindComplete>, - ) -> Poll { - let message = try_ready!(state.receiver.poll()); - let state = state.take(); - - match message { - Some(Message::BindComplete) => transition!(Finished(Portal::new( - state.client.downgrade(), - state.name, - state.statement, - ))), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } -} - -impl BindFuture { - pub fn new( - client: Client, - request: PendingRequest, - name: String, - statement: Statement, - ) -> BindFuture { - Bind::start(client, request, name, statement) - } -} diff --git a/tokio-postgres/src/proto/cancel_query.rs b/tokio-postgres/src/proto/cancel_query.rs deleted file mode 100644 index 909fe6548..000000000 --- a/tokio-postgres/src/proto/cancel_query.rs +++ /dev/null @@ -1,109 +0,0 @@ -use futures::{try_ready, Future, Poll}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::io; - -use crate::config::{Host, SslMode}; -use crate::proto::{CancelQueryRawFuture, ConnectSocketFuture}; -use crate::{Config, Error, MakeTlsConnect, Socket}; - -#[derive(StateMachineFuture)] -pub enum CancelQuery -where - T: MakeTlsConnect, -{ - #[state_machine_future(start, transitions(ConnectingSocket))] - Start { - tls: T, - idx: Option, - config: Config, - process_id: i32, - secret_key: i32, - }, - #[state_machine_future(transitions(Canceling))] - ConnectingSocket { - future: ConnectSocketFuture, - mode: SslMode, - tls: T::TlsConnect, - process_id: i32, - secret_key: i32, - }, - #[state_machine_future(transitions(Finished))] - Canceling { - future: CancelQueryRawFuture, - }, - #[state_machine_future(ready)] - Finished(()), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollCancelQuery for CancelQuery -where - T: MakeTlsConnect, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let mut state = state.take(); - - let idx = state.idx.ok_or_else(|| { - Error::connect(io::Error::new(io::ErrorKind::InvalidInput, "unknown host")) - })?; - - let hostname = match &state.config.0.host[idx] { - Host::Tcp(host) => &**host, - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", - }; - let tls = state - .tls - .make_tls_connect(hostname) - .map_err(|e| Error::tls(e.into()))?; - - transition!(ConnectingSocket { - mode: state.config.0.ssl_mode, - future: ConnectSocketFuture::new(state.config, idx), - tls, - process_id: state.process_id, - secret_key: state.secret_key, - }) - } - - fn poll_connecting_socket<'a>( - state: &'a mut RentToOwn<'a, ConnectingSocket>, - ) -> Poll, Error> { - let socket = try_ready!(state.future.poll()); - let state = state.take(); - - transition!(Canceling { - future: CancelQueryRawFuture::new( - socket, - state.mode, - state.tls, - state.process_id, - state.secret_key - ), - }) - } - - fn poll_canceling<'a>( - state: &'a mut RentToOwn<'a, Canceling>, - ) -> Poll { - try_ready!(state.future.poll()); - transition!(Finished(())) - } -} - -impl CancelQueryFuture -where - T: MakeTlsConnect, -{ - pub fn new( - tls: T, - idx: Option, - config: Config, - process_id: i32, - secret_key: i32, - ) -> CancelQueryFuture { - CancelQuery::start(tls, idx, config, process_id, secret_key) - } -} diff --git a/tokio-postgres/src/proto/cancel_query_raw.rs b/tokio-postgres/src/proto/cancel_query_raw.rs deleted file mode 100644 index 3580b3580..000000000 --- a/tokio-postgres/src/proto/cancel_query_raw.rs +++ /dev/null @@ -1,86 +0,0 @@ -use futures::{try_ready, Future, Poll}; -use postgres_protocol::message::frontend; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use tokio_io::io::{self, Flush, WriteAll}; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::config::SslMode; -use crate::error::Error; -use crate::proto::{MaybeTlsStream, TlsFuture}; -use crate::TlsConnect; - -#[derive(StateMachineFuture)] -pub enum CancelQueryRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - #[state_machine_future(start, transitions(SendingCancel))] - Start { - future: TlsFuture, - process_id: i32, - secret_key: i32, - }, - #[state_machine_future(transitions(FlushingCancel))] - SendingCancel { - future: WriteAll, Vec>, - }, - #[state_machine_future(transitions(Finished))] - FlushingCancel { - future: Flush>, - }, - #[state_machine_future(ready)] - Finished(()), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollCancelQueryRaw for CancelQueryRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let (stream, _) = try_ready!(state.future.poll()); - - let mut buf = vec![]; - frontend::cancel_request(state.process_id, state.secret_key, &mut buf); - - transition!(SendingCancel { - future: io::write_all(stream, buf), - }) - } - - fn poll_sending_cancel<'a>( - state: &'a mut RentToOwn<'a, SendingCancel>, - ) -> Poll, Error> { - let (stream, _) = try_ready_closed!(state.future.poll()); - - transition!(FlushingCancel { - future: io::flush(stream), - }) - } - - fn poll_flushing_cancel<'a>( - state: &'a mut RentToOwn<'a, FlushingCancel>, - ) -> Poll { - try_ready_closed!(state.future.poll()); - transition!(Finished(())) - } -} - -impl CancelQueryRawFuture -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - pub fn new( - stream: S, - mode: SslMode, - tls: T, - process_id: i32, - secret_key: i32, - ) -> CancelQueryRawFuture { - CancelQueryRaw::start(TlsFuture::new(stream, mode, tls), process_id, secret_key) - } -} diff --git a/tokio-postgres/src/proto/client.rs b/tokio-postgres/src/proto/client.rs deleted file mode 100644 index 46184bb9d..000000000 --- a/tokio-postgres/src/proto/client.rs +++ /dev/null @@ -1,376 +0,0 @@ -use antidote::Mutex; -use bytes::IntoBuf; -use futures::sync::mpsc; -use futures::{AsyncSink, Poll, Sink, Stream}; -use postgres_protocol; -use postgres_protocol::message::frontend; -use std::collections::HashMap; -use std::error::Error as StdError; -use std::sync::{Arc, Weak}; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::proto::bind::BindFuture; -use crate::proto::codec::FrontendMessage; -use crate::proto::connection::{Request, RequestMessages}; -use crate::proto::copy_in::{CopyInFuture, CopyInReceiver, CopyMessage}; -use crate::proto::copy_out::CopyOutStream; -use crate::proto::execute::ExecuteFuture; -use crate::proto::idle::{IdleGuard, IdleState}; -use crate::proto::portal::Portal; -use crate::proto::prepare::PrepareFuture; -use crate::proto::query::QueryStream; -use crate::proto::responses::{self, Responses}; -use crate::proto::simple_query::SimpleQueryStream; -use crate::proto::statement::Statement; -#[cfg(feature = "runtime")] -use crate::proto::CancelQueryFuture; -use crate::proto::CancelQueryRawFuture; -use crate::types::{IsNull, Oid, ToSql, Type}; -use crate::{Config, Error, TlsConnect}; -#[cfg(feature = "runtime")] -use crate::{MakeTlsConnect, Socket}; - -pub struct PendingRequest(Result<(RequestMessages, IdleGuard), Error>); - -pub struct WeakClient(Weak); - -impl WeakClient { - pub fn upgrade(&self) -> Option { - self.0.upgrade().map(Client) - } -} - -struct State { - types: HashMap, - typeinfo_query: Option, - typeinfo_enum_query: Option, - typeinfo_composite_query: Option, -} - -struct Inner { - state: Mutex, - idle: IdleState, - sender: mpsc::UnboundedSender, - process_id: i32, - secret_key: i32, - #[cfg_attr(not(feature = "runtime"), allow(dead_code))] - config: Config, - #[cfg_attr(not(feature = "runtime"), allow(dead_code))] - idx: Option, -} - -#[derive(Clone)] -pub struct Client(Arc); - -impl Client { - pub fn new( - sender: mpsc::UnboundedSender, - process_id: i32, - secret_key: i32, - config: Config, - idx: Option, - ) -> Client { - Client(Arc::new(Inner { - state: Mutex::new(State { - types: HashMap::new(), - typeinfo_query: None, - typeinfo_enum_query: None, - typeinfo_composite_query: None, - }), - idle: IdleState::new(), - sender, - process_id, - secret_key, - config, - idx, - })) - } - - pub fn is_closed(&self) -> bool { - self.0.sender.is_closed() - } - - pub fn poll_idle(&self) -> Poll<(), Error> { - self.0.idle.poll_idle() - } - - pub fn downgrade(&self) -> WeakClient { - WeakClient(Arc::downgrade(&self.0)) - } - - pub fn cached_type(&self, oid: Oid) -> Option { - self.0.state.lock().types.get(&oid).cloned() - } - - pub fn cache_type(&self, ty: &Type) { - self.0.state.lock().types.insert(ty.oid(), ty.clone()); - } - - pub fn typeinfo_query(&self) -> Option { - self.0.state.lock().typeinfo_query.clone() - } - - pub fn set_typeinfo_query(&self, statement: &Statement) { - self.0.state.lock().typeinfo_query = Some(statement.clone()); - } - - pub fn typeinfo_enum_query(&self) -> Option { - self.0.state.lock().typeinfo_enum_query.clone() - } - - pub fn set_typeinfo_enum_query(&self, statement: &Statement) { - self.0.state.lock().typeinfo_enum_query = Some(statement.clone()); - } - - pub fn typeinfo_composite_query(&self) -> Option { - self.0.state.lock().typeinfo_composite_query.clone() - } - - pub fn set_typeinfo_composite_query(&self, statement: &Statement) { - self.0.state.lock().typeinfo_composite_query = Some(statement.clone()); - } - - pub fn send(&self, request: PendingRequest) -> Result { - let (messages, idle) = request.0?; - let (sender, receiver) = responses::channel(); - self.0 - .sender - .unbounded_send(Request { - messages, - sender, - idle: Some(idle), - }) - .map(|_| receiver) - .map_err(|_| Error::closed()) - } - - pub fn simple_query(&self, query: &str) -> SimpleQueryStream { - let pending = self.pending(|buf| { - frontend::query(query, buf).map_err(Error::parse)?; - Ok(()) - }); - - SimpleQueryStream::new(self.clone(), pending) - } - - pub fn prepare(&self, name: String, query: &str, param_types: &[Type]) -> PrepareFuture { - let pending = self.pending(|buf| { - frontend::parse(&name, query, param_types.iter().map(Type::oid), buf) - .map_err(Error::parse)?; - frontend::describe(b'S', &name, buf).map_err(Error::parse)?; - frontend::sync(buf); - Ok(()) - }); - - PrepareFuture::new(self.clone(), pending, name) - } - - pub fn execute<'a, I>(&self, statement: &Statement, params: I) -> ExecuteFuture - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let pending = PendingRequest( - self.excecute_message(statement, params) - .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), - ); - ExecuteFuture::new(self.clone(), pending, statement.clone()) - } - - pub fn query<'a, I>(&self, statement: &Statement, params: I) -> QueryStream - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let pending = PendingRequest( - self.excecute_message(statement, params) - .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), - ); - QueryStream::new(self.clone(), pending, statement.clone()) - } - - pub fn bind<'a, I>(&self, statement: &Statement, name: String, params: I) -> BindFuture - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let mut buf = self.bind_message(statement, &name, params); - if let Ok(ref mut buf) = buf { - frontend::sync(buf); - } - let pending = PendingRequest(buf.map(|m| { - ( - RequestMessages::Single(FrontendMessage::Raw(m)), - self.0.idle.guard(), - ) - })); - BindFuture::new(self.clone(), pending, name, statement.clone()) - } - - pub fn query_portal(&self, portal: &Portal, rows: i32) -> QueryStream { - let pending = self.pending(|buf| { - frontend::execute(portal.name(), rows, buf).map_err(Error::parse)?; - frontend::sync(buf); - Ok(()) - }); - QueryStream::new(self.clone(), pending, portal.clone()) - } - - pub fn copy_in<'a, S, I>(&self, statement: &Statement, params: I, stream: S) -> CopyInFuture - where - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - S::Error: Into>, - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let (mut sender, receiver) = mpsc::channel(1); - let pending = PendingRequest(self.excecute_message(statement, params).map(|data| { - match sender.start_send(CopyMessage::Message(data)) { - Ok(AsyncSink::Ready) => {} - _ => unreachable!("channel should have capacity"), - } - ( - RequestMessages::CopyIn { - receiver: CopyInReceiver::new(receiver), - pending_message: None, - }, - self.0.idle.guard(), - ) - })); - CopyInFuture::new(self.clone(), pending, statement.clone(), stream, sender) - } - - pub fn copy_out<'a, I>(&self, statement: &Statement, params: I) -> CopyOutStream - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let pending = PendingRequest( - self.excecute_message(statement, params) - .map(|m| (RequestMessages::Single(m), self.0.idle.guard())), - ); - CopyOutStream::new(self.clone(), pending, statement.clone()) - } - - pub fn close_statement(&self, name: &str) { - self.close(b'S', name) - } - - pub fn close_portal(&self, name: &str) { - self.close(b'P', name) - } - - #[cfg(feature = "runtime")] - pub fn cancel_query(&self, make_tls_mode: T) -> CancelQueryFuture - where - T: MakeTlsConnect, - { - CancelQueryFuture::new( - make_tls_mode, - self.0.idx, - self.0.config.clone(), - self.0.process_id, - self.0.secret_key, - ) - } - - pub fn cancel_query_raw(&self, stream: S, mode: T) -> CancelQueryRawFuture - where - S: AsyncRead + AsyncWrite, - T: TlsConnect, - { - CancelQueryRawFuture::new( - stream, - self.0.config.0.ssl_mode, - mode, - self.0.process_id, - self.0.secret_key, - ) - } - - fn close(&self, ty: u8, name: &str) { - let mut buf = vec![]; - frontend::close(ty, name, &mut buf).expect("statement name not valid"); - frontend::sync(&mut buf); - let (sender, _) = mpsc::channel(0); - let _ = self.0.sender.unbounded_send(Request { - messages: RequestMessages::Single(FrontendMessage::Raw(buf)), - sender, - idle: None, - }); - } - - fn bind_message<'a, I>( - &self, - statement: &Statement, - name: &str, - params: I, - ) -> Result, Error> - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let params = params.into_iter(); - - assert!( - statement.params().len() == params.len(), - "expected {} parameters but got {}", - statement.params().len(), - params.len() - ); - - let mut buf = vec![]; - let mut error_idx = 0; - let r = frontend::bind( - name, - statement.name(), - Some(1), - params.zip(statement.params()).enumerate(), - |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { - Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), - Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), - Err(e) => { - error_idx = idx; - Err(e) - } - }, - Some(1), - &mut buf, - ); - match r { - Ok(()) => Ok(buf), - Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e, error_idx)), - Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)), - } - } - - fn excecute_message<'a, I>( - &self, - statement: &Statement, - params: I, - ) -> Result - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, - { - let mut buf = self.bind_message(statement, "", params)?; - frontend::execute("", 0, &mut buf).map_err(Error::parse)?; - frontend::sync(&mut buf); - Ok(FrontendMessage::Raw(buf)) - } - - fn pending(&self, messages: F) -> PendingRequest - where - F: FnOnce(&mut Vec) -> Result<(), Error>, - { - let mut buf = vec![]; - PendingRequest(messages(&mut buf).map(|()| { - ( - RequestMessages::Single(FrontendMessage::Raw(buf)), - self.0.idle.guard(), - ) - })) - } -} diff --git a/tokio-postgres/src/proto/codec.rs b/tokio-postgres/src/proto/codec.rs deleted file mode 100644 index 4ebebd479..000000000 --- a/tokio-postgres/src/proto/codec.rs +++ /dev/null @@ -1,99 +0,0 @@ -use bytes::{Buf, BytesMut}; -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::backend; -use postgres_protocol::message::frontend::CopyData; -use std::io; -use tokio_codec::{Decoder, Encoder}; - -pub enum FrontendMessage { - Raw(Vec), - CopyData(CopyData>), -} - -pub enum BackendMessage { - Normal { - messages: BackendMessages, - request_complete: bool, - }, - Async(backend::Message), -} - -pub struct BackendMessages(BytesMut); - -impl BackendMessages { - pub fn empty() -> BackendMessages { - BackendMessages(BytesMut::new()) - } -} - -impl FallibleIterator for BackendMessages { - type Item = backend::Message; - type Error = io::Error; - - fn next(&mut self) -> io::Result> { - backend::Message::parse(&mut self.0) - } -} - -pub struct PostgresCodec; - -impl Encoder for PostgresCodec { - type Item = FrontendMessage; - type Error = io::Error; - - fn encode(&mut self, item: FrontendMessage, dst: &mut BytesMut) -> Result<(), io::Error> { - match item { - FrontendMessage::Raw(buf) => dst.extend_from_slice(&buf), - FrontendMessage::CopyData(data) => data.write(dst), - } - - Ok(()) - } -} - -impl Decoder for PostgresCodec { - type Item = BackendMessage; - type Error = io::Error; - - fn decode(&mut self, src: &mut BytesMut) -> Result, io::Error> { - let mut idx = 0; - let mut request_complete = false; - - while let Some(header) = backend::Header::parse(&src[idx..])? { - let len = header.len() as usize + 1; - if src[idx..].len() < len { - break; - } - - match header.tag() { - backend::NOTICE_RESPONSE_TAG - | backend::NOTIFICATION_RESPONSE_TAG - | backend::PARAMETER_STATUS_TAG => { - if idx == 0 { - let message = backend::Message::parse(src)?.unwrap(); - return Ok(Some(BackendMessage::Async(message))); - } else { - break; - } - } - _ => {} - } - - idx += len; - - if header.tag() == backend::READY_FOR_QUERY_TAG { - request_complete = true; - break; - } - } - - if idx == 0 { - Ok(None) - } else { - Ok(Some(BackendMessage::Normal { - messages: BackendMessages(src.split_to(idx)), - request_complete, - })) - } - } -} diff --git a/tokio-postgres/src/proto/connect.rs b/tokio-postgres/src/proto/connect.rs deleted file mode 100644 index 510c96be6..000000000 --- a/tokio-postgres/src/proto/connect.rs +++ /dev/null @@ -1,107 +0,0 @@ -use futures::{Async, Future, Poll}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::config::Host; -use crate::proto::{Client, ConnectOnceFuture, Connection, MaybeTlsStream}; -use crate::{Config, Error, MakeTlsConnect, Socket}; - -#[derive(StateMachineFuture)] -pub enum Connect -where - T: MakeTlsConnect, -{ - #[state_machine_future(start, transitions(Connecting))] - Start { - tls: T, - config: Result, - }, - #[state_machine_future(transitions(Finished))] - Connecting { - future: ConnectOnceFuture, - idx: usize, - tls: T, - config: Config, - }, - #[state_machine_future(ready)] - Finished((Client, Connection>)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollConnect for Connect -where - T: MakeTlsConnect, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let mut state = state.take(); - - let config = state.config?; - - if config.0.host.is_empty() { - return Err(Error::config("host missing".into())); - } - - if config.0.port.len() > 1 && config.0.port.len() != config.0.host.len() { - return Err(Error::config("invalid number of ports".into())); - } - - let hostname = match &config.0.host[0] { - Host::Tcp(host) => &**host, - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", - }; - let tls = state - .tls - .make_tls_connect(hostname) - .map_err(|e| Error::tls(e.into()))?; - - transition!(Connecting { - future: ConnectOnceFuture::new(0, tls, config.clone()), - idx: 0, - tls: state.tls, - config, - }) - } - - fn poll_connecting<'a>( - state: &'a mut RentToOwn<'a, Connecting>, - ) -> Poll, Error> { - loop { - match state.future.poll() { - Ok(Async::Ready(r)) => transition!(Finished(r)), - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(e) => { - let state = &mut **state; - state.idx += 1; - - let host = match state.config.0.host.get(state.idx) { - Some(host) => host, - None => return Err(e), - }; - - let hostname = match host { - Host::Tcp(host) => &**host, - #[cfg(unix)] - Host::Unix(_) => "", - }; - let tls = state - .tls - .make_tls_connect(hostname) - .map_err(|e| Error::tls(e.into()))?; - - state.future = ConnectOnceFuture::new(state.idx, tls, state.config.clone()); - } - } - } - } -} - -impl ConnectFuture -where - T: MakeTlsConnect, -{ - pub fn new(tls: T, config: Result) -> ConnectFuture { - Connect::start(tls, config) - } -} diff --git a/tokio-postgres/src/proto/connect_once.rs b/tokio-postgres/src/proto/connect_once.rs deleted file mode 100644 index c42ebb1d1..000000000 --- a/tokio-postgres/src/proto/connect_once.rs +++ /dev/null @@ -1,121 +0,0 @@ -#![allow(clippy::large_enum_variant)] - -use futures::{try_ready, Async, Future, Poll, Stream}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::io; - -use crate::config::TargetSessionAttrs; -use crate::proto::{ - Client, ConnectRawFuture, ConnectSocketFuture, Connection, MaybeTlsStream, SimpleQueryStream, -}; -use crate::{Config, Error, SimpleQueryMessage, Socket, TlsConnect}; - -#[derive(StateMachineFuture)] -pub enum ConnectOnce -where - T: TlsConnect, -{ - #[state_machine_future(start, transitions(ConnectingSocket))] - Start { idx: usize, tls: T, config: Config }, - #[state_machine_future(transitions(ConnectingRaw))] - ConnectingSocket { - future: ConnectSocketFuture, - idx: usize, - tls: T, - config: Config, - }, - #[state_machine_future(transitions(CheckingSessionAttrs, Finished))] - ConnectingRaw { - future: ConnectRawFuture, - target_session_attrs: TargetSessionAttrs, - }, - #[state_machine_future(transitions(Finished))] - CheckingSessionAttrs { - stream: SimpleQueryStream, - client: Client, - connection: Connection>, - }, - #[state_machine_future(ready)] - Finished((Client, Connection>)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollConnectOnce for ConnectOnce -where - T: TlsConnect, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let state = state.take(); - - transition!(ConnectingSocket { - future: ConnectSocketFuture::new(state.config.clone(), state.idx), - idx: state.idx, - tls: state.tls, - config: state.config, - }) - } - - fn poll_connecting_socket<'a>( - state: &'a mut RentToOwn<'a, ConnectingSocket>, - ) -> Poll, Error> { - let socket = try_ready!(state.future.poll()); - let state = state.take(); - - transition!(ConnectingRaw { - target_session_attrs: state.config.0.target_session_attrs, - future: ConnectRawFuture::new(socket, state.tls, state.config, Some(state.idx)), - }) - } - - fn poll_connecting_raw<'a>( - state: &'a mut RentToOwn<'a, ConnectingRaw>, - ) -> Poll, Error> { - let (client, connection) = try_ready!(state.future.poll()); - - if let TargetSessionAttrs::ReadWrite = state.target_session_attrs { - transition!(CheckingSessionAttrs { - stream: client.simple_query("SHOW transaction_read_only"), - client, - connection, - }) - } else { - transition!(Finished((client, connection))) - } - } - - fn poll_checking_session_attrs<'a>( - state: &'a mut RentToOwn<'a, CheckingSessionAttrs>, - ) -> Poll, Error> { - loop { - if let Async::Ready(()) = state.connection.poll()? { - return Err(Error::closed()); - } - - match try_ready!(state.stream.poll()) { - Some(SimpleQueryMessage::Row(row)) => { - if row.try_get(0)? == Some("on") { - return Err(Error::connect(io::Error::new( - io::ErrorKind::PermissionDenied, - "database does not allow writes", - ))); - } else { - let state = state.take(); - transition!(Finished((state.client, state.connection))) - } - } - Some(_) => {} - None => return Err(Error::closed()), - } - } - } -} - -impl ConnectOnceFuture -where - T: TlsConnect, -{ - pub fn new(idx: usize, tls: T, config: Config) -> ConnectOnceFuture { - ConnectOnce::start(idx, tls, config) - } -} diff --git a/tokio-postgres/src/proto/connect_raw.rs b/tokio-postgres/src/proto/connect_raw.rs deleted file mode 100644 index 0cb0ec033..000000000 --- a/tokio-postgres/src/proto/connect_raw.rs +++ /dev/null @@ -1,442 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::sync::mpsc; -use futures::{sink, Async, AsyncSink}; -use futures::{try_ready, Future, Poll, Sink, Stream}; -use postgres_protocol::authentication; -use postgres_protocol::authentication::sasl::{self, ScramSha256}; -use postgres_protocol::message::backend::Message; -use postgres_protocol::message::frontend; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::collections::HashMap; -use std::io; -use tokio_codec::Framed; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::proto::codec::{BackendMessage, BackendMessages}; -use crate::proto::{Client, Connection, FrontendMessage, MaybeTlsStream, PostgresCodec, TlsFuture}; -use crate::tls::ChannelBinding; -use crate::{Config, Error, TlsConnect}; - -pub struct StartupStream { - inner: Framed, PostgresCodec>, - buf: BackendMessages, -} - -impl Sink for StartupStream -where - S: AsyncRead + AsyncWrite, - T: AsyncRead + AsyncWrite, -{ - type SinkItem = FrontendMessage; - type SinkError = io::Error; - - fn start_send(&mut self, item: FrontendMessage) -> io::Result> { - self.inner.start_send(item) - } - - fn poll_complete(&mut self) -> Poll<(), io::Error> { - self.inner.poll_complete() - } - - fn close(&mut self) -> Poll<(), io::Error> { - self.inner.close() - } -} - -impl Stream for StartupStream -where - S: AsyncRead + AsyncWrite, - T: AsyncRead + AsyncWrite, -{ - type Item = Message; - type Error = io::Error; - - fn poll(&mut self) -> Poll, io::Error> { - loop { - if let Some(message) = self.buf.next()? { - return Ok(Async::Ready(Some(message))); - } - - match try_ready!(self.inner.poll()) { - Some(BackendMessage::Async(message)) => return Ok(Async::Ready(Some(message))), - Some(BackendMessage::Normal { messages, .. }) => self.buf = messages, - None => return Ok(Async::Ready(None)), - } - } - } -} - -#[derive(StateMachineFuture)] -pub enum ConnectRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - #[state_machine_future(start, transitions(SendingStartup))] - Start { - future: TlsFuture, - config: Config, - idx: Option, - }, - #[state_machine_future(transitions(ReadingAuth))] - SendingStartup { - future: sink::Send>, - config: Config, - idx: Option, - channel_binding: ChannelBinding, - }, - #[state_machine_future(transitions(ReadingInfo, SendingPassword, SendingSasl))] - ReadingAuth { - stream: StartupStream, - config: Config, - idx: Option, - channel_binding: ChannelBinding, - }, - #[state_machine_future(transitions(ReadingAuthCompletion))] - SendingPassword { - future: sink::Send>, - config: Config, - idx: Option, - }, - #[state_machine_future(transitions(ReadingSasl))] - SendingSasl { - future: sink::Send>, - scram: ScramSha256, - config: Config, - idx: Option, - }, - #[state_machine_future(transitions(SendingSasl, ReadingAuthCompletion))] - ReadingSasl { - stream: StartupStream, - scram: ScramSha256, - config: Config, - idx: Option, - }, - #[state_machine_future(transitions(ReadingInfo))] - ReadingAuthCompletion { - stream: StartupStream, - config: Config, - idx: Option, - }, - #[state_machine_future(transitions(Finished))] - ReadingInfo { - stream: StartupStream, - process_id: i32, - secret_key: i32, - parameters: HashMap, - config: Config, - idx: Option, - }, - #[state_machine_future(ready)] - Finished((Client, Connection>)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollConnectRaw for ConnectRaw -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let (stream, channel_binding) = try_ready!(state.future.poll()); - let state = state.take(); - - let mut params = vec![("client_encoding", "UTF8"), ("timezone", "GMT")]; - if let Some(user) = &state.config.0.user { - params.push(("user", &**user)); - } - if let Some(dbname) = &state.config.0.dbname { - params.push(("database", &**dbname)); - } - if let Some(options) = &state.config.0.options { - params.push(("options", &**options)); - } - if let Some(application_name) = &state.config.0.application_name { - params.push(("application_name", &**application_name)); - } - - let mut buf = vec![]; - frontend::startup_message(params, &mut buf).map_err(Error::encode)?; - - let stream = Framed::new(stream, PostgresCodec); - let stream = StartupStream { - inner: stream, - buf: BackendMessages::empty(), - }; - - transition!(SendingStartup { - future: stream.send(FrontendMessage::Raw(buf)), - config: state.config, - idx: state.idx, - channel_binding, - }) - } - - fn poll_sending_startup<'a>( - state: &'a mut RentToOwn<'a, SendingStartup>, - ) -> Poll, Error> { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingAuth { - stream, - config: state.config, - idx: state.idx, - channel_binding: state.channel_binding, - }) - } - - fn poll_reading_auth<'a>( - state: &'a mut RentToOwn<'a, ReadingAuth>, - ) -> Poll, Error> { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let state = state.take(); - - match message { - Some(Message::AuthenticationOk) => transition!(ReadingInfo { - stream: state.stream, - process_id: 0, - secret_key: 0, - parameters: HashMap::new(), - config: state.config, - idx: state.idx, - }), - Some(Message::AuthenticationCleartextPassword) => { - let pass = state - .config - .0 - .password - .as_ref() - .ok_or_else(|| Error::config("password missing".into()))?; - let mut buf = vec![]; - frontend::password_message(pass, &mut buf).map_err(Error::encode)?; - transition!(SendingPassword { - future: state.stream.send(FrontendMessage::Raw(buf)), - config: state.config, - idx: state.idx, - }) - } - Some(Message::AuthenticationMd5Password(body)) => { - let user = state - .config - .0 - .user - .as_ref() - .ok_or_else(|| Error::config("user missing".into()))?; - let pass = state - .config - .0 - .password - .as_ref() - .ok_or_else(|| Error::config("password missing".into()))?; - let output = authentication::md5_hash(user.as_bytes(), pass, body.salt()); - let mut buf = vec![]; - frontend::password_message(output.as_bytes(), &mut buf).map_err(Error::encode)?; - transition!(SendingPassword { - future: state.stream.send(FrontendMessage::Raw(buf)), - config: state.config, - idx: state.idx, - }) - } - Some(Message::AuthenticationSasl(body)) => { - let pass = state - .config - .0 - .password - .as_ref() - .ok_or_else(|| Error::config("password missing".into()))?; - - let mut has_scram = false; - let mut has_scram_plus = false; - let mut mechanisms = body.mechanisms(); - while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? { - match mechanism { - sasl::SCRAM_SHA_256 => has_scram = true, - sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true, - _ => {} - } - } - - let channel_binding = if let Some(tls_server_end_point) = - state.channel_binding.tls_server_end_point - { - Some(sasl::ChannelBinding::tls_server_end_point( - tls_server_end_point, - )) - } else { - None - }; - - let (channel_binding, mechanism) = if has_scram_plus { - match channel_binding { - Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), - None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else if has_scram { - match channel_binding { - Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), - None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), - } - } else { - return Err(Error::authentication( - "unsupported authentication method".into(), - )); - }; - - let scram = ScramSha256::new(pass, channel_binding); - - let mut buf = vec![]; - frontend::sasl_initial_response(mechanism, scram.message(), &mut buf) - .map_err(Error::encode)?; - - transition!(SendingSasl { - future: state.stream.send(FrontendMessage::Raw(buf)), - scram, - config: state.config, - idx: state.idx, - }) - } - Some(Message::AuthenticationKerberosV5) - | Some(Message::AuthenticationScmCredential) - | Some(Message::AuthenticationGss) - | Some(Message::AuthenticationSspi) => Err(Error::authentication( - "unsupported authentication method".into(), - )), - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_sending_password<'a>( - state: &'a mut RentToOwn<'a, SendingPassword>, - ) -> Poll, Error> { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingAuthCompletion { - stream, - config: state.config, - idx: state.idx, - }) - } - - fn poll_sending_sasl<'a>( - state: &'a mut RentToOwn<'a, SendingSasl>, - ) -> Poll, Error> { - let stream = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingSasl { - stream, - scram: state.scram, - config: state.config, - idx: state.idx, - }) - } - - fn poll_reading_sasl<'a>( - state: &'a mut RentToOwn<'a, ReadingSasl>, - ) -> Poll, Error> { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let mut state = state.take(); - - match message { - Some(Message::AuthenticationSaslContinue(body)) => { - state - .scram - .update(body.data()) - .map_err(|e| Error::authentication(Box::new(e)))?; - let mut buf = vec![]; - frontend::sasl_response(state.scram.message(), &mut buf).map_err(Error::encode)?; - transition!(SendingSasl { - future: state.stream.send(FrontendMessage::Raw(buf)), - scram: state.scram, - config: state.config, - idx: state.idx, - }) - } - Some(Message::AuthenticationSaslFinal(body)) => { - state - .scram - .finish(body.data()) - .map_err(|e| Error::authentication(Box::new(e)))?; - transition!(ReadingAuthCompletion { - stream: state.stream, - config: state.config, - idx: state.idx, - }) - } - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_reading_auth_completion<'a>( - state: &'a mut RentToOwn<'a, ReadingAuthCompletion>, - ) -> Poll, Error> { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - let state = state.take(); - - match message { - Some(Message::AuthenticationOk) => transition!(ReadingInfo { - stream: state.stream, - process_id: 0, - secret_key: 0, - parameters: HashMap::new(), - config: state.config, - idx: state.idx, - }), - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_reading_info<'a>( - state: &'a mut RentToOwn<'a, ReadingInfo>, - ) -> Poll, Error> { - loop { - let message = try_ready!(state.stream.poll().map_err(Error::io)); - match message { - Some(Message::BackendKeyData(body)) => { - state.process_id = body.process_id(); - state.secret_key = body.secret_key(); - } - Some(Message::ParameterStatus(body)) => { - state.parameters.insert( - body.name().map_err(Error::parse)?.to_string(), - body.value().map_err(Error::parse)?.to_string(), - ); - } - Some(Message::ReadyForQuery(_)) => { - let state = state.take(); - let (sender, receiver) = mpsc::unbounded(); - let client = Client::new( - sender, - state.process_id, - state.secret_key, - state.config, - state.idx, - ); - let connection = - Connection::new(state.stream.inner, state.parameters, receiver); - transition!(Finished((client, connection))) - } - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(Message::NoticeResponse(_)) => {} - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - } - } - } -} - -impl ConnectRawFuture -where - S: AsyncRead + AsyncWrite, - T: TlsConnect, -{ - pub fn new(stream: S, tls: T, config: Config, idx: Option) -> ConnectRawFuture { - ConnectRaw::start(TlsFuture::new(stream, config.0.ssl_mode, tls), config, idx) - } -} diff --git a/tokio-postgres/src/proto/connect_socket.rs b/tokio-postgres/src/proto/connect_socket.rs deleted file mode 100644 index 7b1f92056..000000000 --- a/tokio-postgres/src/proto/connect_socket.rs +++ /dev/null @@ -1,198 +0,0 @@ -use futures::{try_ready, Async, Future, Poll}; -use futures_cpupool::{CpuFuture, CpuPool}; -use lazy_static::lazy_static; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::io; -use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; -use std::time::Instant; -use std::vec; -use tokio_tcp::TcpStream; -use tokio_timer::Delay; -#[cfg(unix)] -use tokio_uds::UnixStream; - -use crate::config::Host; -use crate::{Config, Error, Socket}; - -lazy_static! { - static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new() - .name_prefix("postgres-dns-") - .pool_size(2) - .create(); -} - -#[derive(StateMachineFuture)] -pub enum ConnectSocket { - #[state_machine_future(start)] - #[cfg_attr( - unix, - state_machine_future(transitions(ConnectingUnix, ConnectingTcp, ResolvingDns)) - )] - #[cfg_attr( - not(unix), - state_machine_future(transitions(ConnectingTcp, ResolvingDns)) - )] - Start { config: Config, idx: usize }, - #[cfg(unix)] - #[state_machine_future(transitions(Finished))] - ConnectingUnix { - future: tokio_uds::ConnectFuture, - timeout: Option, - }, - #[state_machine_future(transitions(ConnectingTcp))] - ResolvingDns { - future: CpuFuture, io::Error>, - config: Config, - }, - #[state_machine_future(transitions(Finished))] - ConnectingTcp { - future: tokio_tcp::ConnectFuture, - timeout: Option, - addrs: vec::IntoIter, - config: Config, - }, - #[state_machine_future(ready)] - Finished(Socket), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollConnectSocket for ConnectSocket { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - - let port = *state - .config - .0 - .port - .get(state.idx) - .or_else(|| state.config.0.port.get(0)) - .unwrap_or(&5432); - - match &state.config.0.host[state.idx] { - Host::Tcp(host) => match host.parse::() { - Ok(addr) => transition!(ConnectingTcp { - future: TcpStream::connect(&SocketAddr::new(addr, port)), - timeout: state - .config - .0 - .connect_timeout - .map(|d| Delay::new(Instant::now() + d)), - addrs: vec![].into_iter(), - config: state.config, - }), - Err(_) => transition!(ResolvingDns { - future: DNS_POOL.spawn_fn({ - let host = host.clone(); - move || (&*host, port).to_socket_addrs() - }), - config: state.config, - }), - }, - #[cfg(unix)] - Host::Unix(host) => { - let path = host.join(format!(".s.PGSQL.{}", port)); - let timeout = state - .config - .0 - .connect_timeout - .map(|d| Delay::new(Instant::now() + d)); - transition!(ConnectingUnix { - future: UnixStream::connect(path), - timeout, - }) - } - } - } - - #[cfg(unix)] - fn poll_connecting_unix<'a>( - state: &'a mut RentToOwn<'a, ConnectingUnix>, - ) -> Poll { - if let Some(timeout) = &mut state.timeout { - match timeout.poll() { - Ok(Async::Ready(())) => { - return Err(Error::connect(io::Error::from(io::ErrorKind::TimedOut))); - } - Ok(Async::NotReady) => {} - Err(e) => return Err(Error::connect(io::Error::new(io::ErrorKind::Other, e))), - } - } - let socket = try_ready!(state.future.poll().map_err(Error::connect)); - - transition!(Finished(Socket::new_unix(socket))) - } - - fn poll_resolving_dns<'a>( - state: &'a mut RentToOwn<'a, ResolvingDns>, - ) -> Poll { - let mut addrs = try_ready!(state.future.poll().map_err(Error::connect)); - let state = state.take(); - - let addr = match addrs.next() { - Some(addr) => addr, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::InvalidData, - "resolved 0 addresses", - ))); - } - }; - - let timeout = state - .config - .0 - .connect_timeout - .map(|d| Delay::new(Instant::now() + d)); - - transition!(ConnectingTcp { - future: TcpStream::connect(&addr), - addrs, - timeout: timeout, - config: state.config, - }) - } - - fn poll_connecting_tcp<'a>( - state: &'a mut RentToOwn<'a, ConnectingTcp>, - ) -> Poll { - let stream = loop { - let error = match state.future.poll() { - Ok(Async::Ready(stream)) => break stream, - Ok(Async::NotReady) => match &mut state.timeout { - Some(timeout) => { - try_ready!(timeout - .poll() - .map_err(|e| Error::connect(io::Error::new(io::ErrorKind::Other, e)))); - io::Error::from(io::ErrorKind::TimedOut) - } - None => return Ok(Async::NotReady), - }, - Err(e) => e, - }; - - let addr = state.addrs.next().ok_or_else(|| Error::connect(error))?; - state.future = TcpStream::connect(&addr); - state.timeout = state - .config - .0 - .connect_timeout - .map(|d| Delay::new(Instant::now() + d)); - }; - - stream.set_nodelay(true).map_err(Error::connect)?; - if state.config.0.keepalives { - stream - .set_keepalive(Some(state.config.0.keepalives_idle)) - .map_err(Error::connect)?; - } - - transition!(Finished(Socket::new_tcp(stream))); - } -} - -impl ConnectSocketFuture { - pub fn new(config: Config, idx: usize) -> ConnectSocketFuture { - ConnectSocket::start(config, idx) - } -} diff --git a/tokio-postgres/src/proto/connection.rs b/tokio-postgres/src/proto/connection.rs deleted file mode 100644 index 222fd16ea..000000000 --- a/tokio-postgres/src/proto/connection.rs +++ /dev/null @@ -1,321 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::sync::mpsc; -use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; -use log::trace; -use postgres_protocol::message::backend::Message; -use postgres_protocol::message::frontend; -use std::collections::{HashMap, VecDeque}; -use std::io; -use tokio_codec::Framed; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::proto::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; -use crate::proto::copy_in::CopyInReceiver; -use crate::proto::idle::IdleGuard; -use crate::{AsyncMessage, Notification}; -use crate::{DbError, Error}; - -pub enum RequestMessages { - Single(FrontendMessage), - CopyIn { - receiver: CopyInReceiver, - pending_message: Option, - }, -} - -pub struct Request { - pub messages: RequestMessages, - pub sender: mpsc::Sender, - pub idle: Option, -} - -struct Response { - sender: mpsc::Sender, - _idle: Option, -} - -#[derive(PartialEq, Debug)] -enum State { - Active, - Terminating, - Closing, -} - -pub struct Connection { - stream: Framed, - parameters: HashMap, - receiver: mpsc::UnboundedReceiver, - pending_request: Option, - pending_response: Option, - responses: VecDeque, - state: State, -} - -impl Connection -where - S: AsyncRead + AsyncWrite, -{ - pub fn new( - stream: Framed, - parameters: HashMap, - receiver: mpsc::UnboundedReceiver, - ) -> Connection { - Connection { - stream, - parameters, - receiver, - pending_request: None, - pending_response: None, - responses: VecDeque::new(), - state: State::Active, - } - } - - pub fn parameter(&self, name: &str) -> Option<&str> { - self.parameters.get(name).map(|s| &**s) - } - - fn poll_response(&mut self) -> Poll, io::Error> { - if let Some(message) = self.pending_response.take() { - trace!("retrying pending response"); - return Ok(Async::Ready(Some(message))); - } - - self.stream.poll() - } - - fn poll_read(&mut self) -> Result, Error> { - if self.state != State::Active { - trace!("poll_read: done"); - return Ok(None); - } - - loop { - let message = match self.poll_response().map_err(Error::io)? { - Async::Ready(Some(message)) => message, - Async::Ready(None) => { - return Err(Error::closed()); - } - Async::NotReady => { - trace!("poll_read: waiting on response"); - return Ok(None); - } - }; - - let (mut messages, request_complete) = match message { - BackendMessage::Async(Message::NoticeResponse(body)) => { - let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?; - return Ok(Some(AsyncMessage::Notice(error))); - } - BackendMessage::Async(Message::NotificationResponse(body)) => { - let notification = Notification { - process_id: body.process_id(), - channel: body.channel().map_err(Error::parse)?.to_string(), - payload: body.message().map_err(Error::parse)?.to_string(), - }; - return Ok(Some(AsyncMessage::Notification(notification))); - } - BackendMessage::Async(Message::ParameterStatus(body)) => { - self.parameters.insert( - body.name().map_err(Error::parse)?.to_string(), - body.value().map_err(Error::parse)?.to_string(), - ); - continue; - } - BackendMessage::Async(_) => unreachable!(), - BackendMessage::Normal { - messages, - request_complete, - } => (messages, request_complete), - }; - - let mut response = match self.responses.pop_front() { - Some(response) => response, - None => match messages.next().map_err(Error::parse)? { - Some(Message::ErrorResponse(error)) => return Err(Error::db(error)), - _ => return Err(Error::unexpected_message()), - }, - }; - - match response.sender.start_send(messages) { - // if the receiver's hung up we still need to page through the rest of the messages - // designated to it - Ok(AsyncSink::Ready) | Err(_) => { - if !request_complete { - self.responses.push_front(response); - } - } - Ok(AsyncSink::NotReady(messages)) => { - self.responses.push_front(response); - self.pending_response = Some(BackendMessage::Normal { - messages, - request_complete, - }); - trace!("poll_read: waiting on sender"); - return Ok(None); - } - } - } - } - - fn poll_request(&mut self) -> Poll, Error> { - if let Some(message) = self.pending_request.take() { - trace!("retrying pending request"); - return Ok(Async::Ready(Some(message))); - } - - match self.receiver.poll() { - Ok(Async::Ready(Some(request))) => { - trace!("polled new request"); - self.responses.push_back(Response { - sender: request.sender, - _idle: request.idle, - }); - Ok(Async::Ready(Some(request.messages))) - } - Ok(Async::Ready(None)) => Ok(Async::Ready(None)), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(()) => unreachable!("mpsc::Receiver doesn't error"), - } - } - - fn poll_write(&mut self) -> Result { - loop { - if self.state == State::Closing { - trace!("poll_write: done"); - return Ok(false); - } - - let request = match self.poll_request()? { - Async::Ready(Some(request)) => request, - Async::Ready(None) if self.responses.is_empty() && self.state == State::Active => { - trace!("poll_write: at eof, terminating"); - self.state = State::Terminating; - let mut request = vec![]; - frontend::terminate(&mut request); - RequestMessages::Single(FrontendMessage::Raw(request)) - } - Async::Ready(None) => { - trace!( - "poll_write: at eof, pending responses {}", - self.responses.len(), - ); - return Ok(true); - } - Async::NotReady => { - trace!("poll_write: waiting on request"); - return Ok(true); - } - }; - - match request { - RequestMessages::Single(request) => { - match self.stream.start_send(request).map_err(Error::io)? { - AsyncSink::Ready => { - if self.state == State::Terminating { - trace!("poll_write: sent eof, closing"); - self.state = State::Closing; - } - } - AsyncSink::NotReady(request) => { - trace!("poll_write: waiting on socket"); - self.pending_request = Some(RequestMessages::Single(request)); - return Ok(false); - } - } - } - RequestMessages::CopyIn { - mut receiver, - mut pending_message, - } => { - let message = match pending_message.take() { - Some(message) => message, - None => match receiver.poll() { - Ok(Async::Ready(Some(message))) => message, - Ok(Async::Ready(None)) => { - trace!("poll_write: finished copy_in request"); - continue; - } - Ok(Async::NotReady) => { - trace!("poll_write: waiting on copy_in stream"); - self.pending_request = Some(RequestMessages::CopyIn { - receiver, - pending_message, - }); - return Ok(true); - } - Err(()) => unreachable!("mpsc::Receiver doesn't return errors"), - }, - }; - - match self.stream.start_send(message).map_err(Error::io)? { - AsyncSink::Ready => { - self.pending_request = Some(RequestMessages::CopyIn { - receiver, - pending_message: None, - }); - } - AsyncSink::NotReady(message) => { - trace!("poll_write: waiting on socket"); - self.pending_request = Some(RequestMessages::CopyIn { - receiver, - pending_message: Some(message), - }); - return Ok(false); - } - }; - } - } - } - } - - fn poll_flush(&mut self) -> Result<(), Error> { - match self.stream.poll_complete().map_err(Error::io)? { - Async::Ready(()) => trace!("poll_flush: flushed"), - Async::NotReady => trace!("poll_flush: waiting on socket"), - } - Ok(()) - } - - fn poll_shutdown(&mut self) -> Poll<(), Error> { - if self.state != State::Closing { - return Ok(Async::NotReady); - } - - match self.stream.close().map_err(Error::io)? { - Async::Ready(()) => { - trace!("poll_shutdown: complete"); - Ok(Async::Ready(())) - } - Async::NotReady => { - trace!("poll_shutdown: waiting on socket"); - Ok(Async::NotReady) - } - } - } - - pub fn poll_message(&mut self) -> Poll, Error> { - let message = self.poll_read()?; - let want_flush = self.poll_write()?; - if want_flush { - self.poll_flush()?; - } - match message { - Some(message) => Ok(Async::Ready(Some(message))), - None => self.poll_shutdown().map(|r| r.map(|()| None)), - } - } -} - -impl Future for Connection -where - S: AsyncRead + AsyncWrite, -{ - type Item = (); - type Error = Error; - - fn poll(&mut self) -> Poll<(), Error> { - while let Some(_) = try_ready!(self.poll_message()) {} - Ok(Async::Ready(())) - } -} diff --git a/tokio-postgres/src/proto/copy_in.rs b/tokio-postgres/src/proto/copy_in.rs deleted file mode 100644 index 762f1c462..000000000 --- a/tokio-postgres/src/proto/copy_in.rs +++ /dev/null @@ -1,270 +0,0 @@ -use bytes::{Buf, BufMut, BytesMut, IntoBuf}; -use futures::sink; -use futures::stream; -use futures::sync::mpsc; -use futures::{try_ready, Async, AsyncSink, Future, Poll, Sink, Stream}; -use postgres_protocol::message::backend::Message; -use postgres_protocol::message::frontend::{self, CopyData}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::error::Error as StdError; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::codec::FrontendMessage; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::Error; - -pub enum CopyMessage { - Message(FrontendMessage), - Done, -} - -pub struct CopyInReceiver { - receiver: mpsc::Receiver, - done: bool, -} - -impl CopyInReceiver { - pub fn new(receiver: mpsc::Receiver) -> CopyInReceiver { - CopyInReceiver { - receiver, - done: false, - } - } -} - -impl Stream for CopyInReceiver { - type Item = FrontendMessage; - type Error = (); - - fn poll(&mut self) -> Poll, ()> { - if self.done { - return Ok(Async::Ready(None)); - } - - match self.receiver.poll()? { - Async::Ready(Some(CopyMessage::Message(message))) => Ok(Async::Ready(Some(message))), - Async::Ready(Some(CopyMessage::Done)) => { - self.done = true; - let mut buf = vec![]; - frontend::copy_done(&mut buf); - frontend::sync(&mut buf); - Ok(Async::Ready(Some(FrontendMessage::Raw(buf)))) - } - Async::Ready(None) => { - self.done = true; - let mut buf = vec![]; - frontend::copy_fail("", &mut buf).unwrap(); - frontend::sync(&mut buf); - Ok(Async::Ready(Some(FrontendMessage::Raw(buf)))) - } - Async::NotReady => Ok(Async::NotReady), - } - } -} - -#[derive(StateMachineFuture)] -pub enum CopyIn -where - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - S::Error: Into>, -{ - #[state_machine_future(start, transitions(ReadCopyInResponse))] - Start { - client: Client, - request: PendingRequest, - statement: Statement, - stream: S, - sender: mpsc::Sender, - }, - #[state_machine_future(transitions(WriteCopyData))] - ReadCopyInResponse { - stream: S, - sender: mpsc::Sender, - receiver: Responses, - }, - #[state_machine_future(transitions(WriteCopyDone))] - WriteCopyData { - stream: stream::Fuse, - buf: BytesMut, - pending_message: Option, - sender: mpsc::Sender, - receiver: Responses, - }, - #[state_machine_future(transitions(ReadCommandComplete))] - WriteCopyDone { - future: sink::Send>, - receiver: Responses, - }, - #[state_machine_future(transitions(Finished))] - ReadCommandComplete { receiver: Responses }, - #[state_machine_future(ready)] - Finished(u64), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollCopyIn for CopyIn -where - S: Stream, - S::Item: IntoBuf, - ::Buf: 'static + Send, - S::Error: Into>, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let state = state.take(); - let receiver = state.client.send(state.request)?; - - // the statement can drop after this point, since its close will queue up after the copy - transition!(ReadCopyInResponse { - stream: state.stream, - sender: state.sender, - receiver - }) - } - - fn poll_read_copy_in_response<'a>( - state: &'a mut RentToOwn<'a, ReadCopyInResponse>, - ) -> Poll, Error> { - loop { - let message = try_ready!(state.receiver.poll()); - - match message { - Some(Message::BindComplete) => {} - Some(Message::CopyInResponse(_)) => { - let state = state.take(); - transition!(WriteCopyData { - stream: state.stream.fuse(), - buf: BytesMut::new(), - pending_message: None, - sender: state.sender, - receiver: state.receiver - }) - } - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - } - } - } - - fn poll_write_copy_data<'a>( - state: &'a mut RentToOwn<'a, WriteCopyData>, - ) -> Poll { - if let Some(message) = state.pending_message.take() { - match state - .sender - .start_send(message) - .map_err(|_| Error::closed())? - { - AsyncSink::Ready => {} - AsyncSink::NotReady(message) => { - state.pending_message = Some(message); - return Ok(Async::NotReady); - } - } - } - - loop { - let buf: Box = loop { - match try_ready!(state.stream.poll().map_err(Error::copy_in_stream)) { - Some(buf) => { - let buf = buf.into_buf(); - if buf.remaining() > 4096 { - if state.buf.is_empty() { - break Box::new(buf); - } else { - let cur_buf = state.buf.take().freeze().into_buf(); - break Box::new(cur_buf.chain(buf)); - } - } - - state.buf.reserve(buf.remaining()); - state.buf.put(buf); - if state.buf.len() > 4096 { - break Box::new(state.buf.take().freeze().into_buf()); - } - } - None => break Box::new(state.buf.take().freeze().into_buf()), - } - }; - - if buf.has_remaining() { - let data = CopyData::new(buf).map_err(Error::encode)?; - let message = CopyMessage::Message(FrontendMessage::CopyData(data)); - - match state - .sender - .start_send(message) - .map_err(|_| Error::closed())? - { - AsyncSink::Ready => {} - AsyncSink::NotReady(message) => { - state.pending_message = Some(message); - return Ok(Async::NotReady); - } - } - } else { - let state = state.take(); - transition!(WriteCopyDone { - future: state.sender.send(CopyMessage::Done), - receiver: state.receiver, - }); - } - } - } - - fn poll_write_copy_done<'a>( - state: &'a mut RentToOwn<'a, WriteCopyDone>, - ) -> Poll { - try_ready!(state.future.poll().map_err(|_| Error::closed())); - let state = state.take(); - - transition!(ReadCommandComplete { - receiver: state.receiver - }) - } - - fn poll_read_command_complete<'a>( - state: &'a mut RentToOwn<'a, ReadCommandComplete>, - ) -> Poll { - let message = try_ready!(state.receiver.poll()); - - match message { - Some(Message::CommandComplete(body)) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - transition!(Finished(rows)) - } - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } -} - -impl CopyInFuture -where - S: Stream, - S::Item: IntoBuf, - ::Buf: Send, - S::Error: Into>, -{ - pub fn new( - client: Client, - request: PendingRequest, - statement: Statement, - stream: S, - sender: mpsc::Sender, - ) -> CopyInFuture { - CopyIn::start(client, request, statement, stream, sender) - } -} diff --git a/tokio-postgres/src/proto/copy_out.rs b/tokio-postgres/src/proto/copy_out.rs deleted file mode 100644 index 1ae714188..000000000 --- a/tokio-postgres/src/proto/copy_out.rs +++ /dev/null @@ -1,105 +0,0 @@ -use bytes::Bytes; -use futures::{Async, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use std::mem; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::Error; - -enum State { - Start { - client: Client, - request: PendingRequest, - statement: Statement, - }, - ReadingCopyOutResponse { - receiver: Responses, - }, - ReadingCopyData { - receiver: Responses, - }, - Done, -} - -pub struct CopyOutStream(State); - -impl Stream for CopyOutStream { - type Item = Bytes; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - loop { - match mem::replace(&mut self.0, State::Done) { - State::Start { - client, - request, - statement, - } => { - let receiver = client.send(request)?; - // it's ok for the statement to close now that we've queued the query - drop(statement); - self.0 = State::ReadingCopyOutResponse { receiver }; - } - State::ReadingCopyOutResponse { mut receiver } => { - let message = match receiver.poll() { - Ok(Async::Ready(message)) => message, - Ok(Async::NotReady) => { - self.0 = State::ReadingCopyOutResponse { receiver }; - break Ok(Async::NotReady); - } - Err(e) => return Err(e), - }; - - match message { - Some(Message::BindComplete) => { - self.0 = State::ReadingCopyOutResponse { receiver }; - } - Some(Message::CopyOutResponse(_)) => { - self.0 = State::ReadingCopyData { receiver }; - } - Some(Message::ErrorResponse(body)) => break Err(Error::db(body)), - Some(_) => break Err(Error::unexpected_message()), - None => break Err(Error::closed()), - } - } - State::ReadingCopyData { mut receiver } => { - let message = match receiver.poll() { - Ok(Async::Ready(message)) => message, - Ok(Async::NotReady) => { - self.0 = State::ReadingCopyData { receiver }; - break Ok(Async::NotReady); - } - Err(e) => return Err(e), - }; - - match message { - Some(Message::CopyData(body)) => { - self.0 = State::ReadingCopyData { receiver }; - break Ok(Async::Ready(Some(body.into_bytes()))); - } - Some(Message::CopyDone) | Some(Message::CommandComplete(_)) => { - self.0 = State::ReadingCopyData { receiver }; - } - Some(Message::ReadyForQuery(_)) => break Ok(Async::Ready(None)), - Some(Message::ErrorResponse(body)) => break Err(Error::db(body)), - Some(_) => break Err(Error::unexpected_message()), - None => break Err(Error::closed()), - } - } - State::Done => break Ok(Async::Ready(None)), - } - } - } -} - -impl CopyOutStream { - pub fn new(client: Client, request: PendingRequest, statement: Statement) -> CopyOutStream { - CopyOutStream(State::Start { - client, - request, - statement, - }) - } -} diff --git a/tokio-postgres/src/proto/execute.rs b/tokio-postgres/src/proto/execute.rs deleted file mode 100644 index 0f8e021fe..000000000 --- a/tokio-postgres/src/proto/execute.rs +++ /dev/null @@ -1,68 +0,0 @@ -use futures::{try_ready, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::Error; - -#[derive(StateMachineFuture)] -pub enum Execute { - #[state_machine_future(start, transitions(ReadResponse))] - Start { - client: Client, - request: PendingRequest, - statement: Statement, - }, - #[state_machine_future(transitions(Finished))] - ReadResponse { receiver: Responses }, - #[state_machine_future(ready)] - Finished(u64), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollExecute for Execute { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - let receiver = state.client.send(state.request)?; - - // the statement can drop after this point, since its close will queue up after the execution - transition!(ReadResponse { receiver }) - } - - fn poll_read_response<'a>( - state: &'a mut RentToOwn<'a, ReadResponse>, - ) -> Poll { - loop { - let message = try_ready!(state.receiver.poll()); - - match message { - Some(Message::BindComplete) => {} - Some(Message::DataRow(_)) => {} - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(Message::CommandComplete(body)) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - transition!(Finished(rows)) - } - Some(Message::EmptyQueryResponse) => transition!(Finished(0)), - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - } - } - } -} - -impl ExecuteFuture { - pub fn new(client: Client, request: PendingRequest, statement: Statement) -> ExecuteFuture { - Execute::start(client, request, statement) - } -} diff --git a/tokio-postgres/src/proto/idle.rs b/tokio-postgres/src/proto/idle.rs deleted file mode 100644 index d4cbe8f06..000000000 --- a/tokio-postgres/src/proto/idle.rs +++ /dev/null @@ -1,47 +0,0 @@ -use futures::task::AtomicTask; -use futures::{Async, Poll}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; - -use crate::Error; - -struct Inner { - active: AtomicUsize, - task: AtomicTask, -} - -pub struct IdleState(Arc); - -impl IdleState { - pub fn new() -> IdleState { - IdleState(Arc::new(Inner { - active: AtomicUsize::new(0), - task: AtomicTask::new(), - })) - } - - pub fn guard(&self) -> IdleGuard { - self.0.active.fetch_add(1, Ordering::SeqCst); - IdleGuard(self.0.clone()) - } - - pub fn poll_idle(&self) -> Poll<(), Error> { - self.0.task.register(); - - if self.0.active.load(Ordering::SeqCst) == 0 { - Ok(Async::Ready(())) - } else { - Ok(Async::NotReady) - } - } -} - -pub struct IdleGuard(Arc); - -impl Drop for IdleGuard { - fn drop(&mut self) { - if self.0.active.fetch_sub(1, Ordering::SeqCst) == 1 { - self.0.task.notify(); - } - } -} diff --git a/tokio-postgres/src/proto/maybe_tls_stream.rs b/tokio-postgres/src/proto/maybe_tls_stream.rs deleted file mode 100644 index 928674f28..000000000 --- a/tokio-postgres/src/proto/maybe_tls_stream.rs +++ /dev/null @@ -1,88 +0,0 @@ -use bytes::{Buf, BufMut}; -use futures::Poll; -use std::io::{self, Read, Write}; -use tokio_io::{AsyncRead, AsyncWrite}; - -pub enum MaybeTlsStream { - Raw(T), - Tls(U), -} - -impl Read for MaybeTlsStream -where - T: Read, - U: Read, -{ - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match self { - MaybeTlsStream::Raw(s) => s.read(buf), - MaybeTlsStream::Tls(s) => s.read(buf), - } - } -} - -impl AsyncRead for MaybeTlsStream -where - T: AsyncRead, - U: AsyncRead, -{ - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - match self { - MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), - MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), - } - } - - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - match self { - MaybeTlsStream::Raw(s) => s.read_buf(buf), - MaybeTlsStream::Tls(s) => s.read_buf(buf), - } - } -} - -impl Write for MaybeTlsStream -where - T: Write, - U: Write, -{ - fn write(&mut self, buf: &[u8]) -> io::Result { - match self { - MaybeTlsStream::Raw(s) => s.write(buf), - MaybeTlsStream::Tls(s) => s.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - match self { - MaybeTlsStream::Raw(s) => s.flush(), - MaybeTlsStream::Tls(s) => s.flush(), - } - } -} - -impl AsyncWrite for MaybeTlsStream -where - T: AsyncWrite, - U: AsyncWrite, -{ - fn shutdown(&mut self) -> Poll<(), io::Error> { - match self { - MaybeTlsStream::Raw(s) => s.shutdown(), - MaybeTlsStream::Tls(s) => s.shutdown(), - } - } - - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - match self { - MaybeTlsStream::Raw(s) => s.write_buf(buf), - MaybeTlsStream::Tls(s) => s.write_buf(buf), - } - } -} diff --git a/tokio-postgres/src/proto/mod.rs b/tokio-postgres/src/proto/mod.rs deleted file mode 100644 index 2979d8a30..000000000 --- a/tokio-postgres/src/proto/mod.rs +++ /dev/null @@ -1,66 +0,0 @@ -macro_rules! try_ready_closed { - ($e:expr) => { - match $e { - Ok(::futures::Async::Ready(v)) => v, - Ok(::futures::Async::NotReady) => return Ok(::futures::Async::NotReady), - Err(_) => return Err(crate::Error::closed()), - } - }; -} - -mod bind; -#[cfg(feature = "runtime")] -mod cancel_query; -mod cancel_query_raw; -mod client; -mod codec; -#[cfg(feature = "runtime")] -mod connect; -#[cfg(feature = "runtime")] -mod connect_once; -mod connect_raw; -#[cfg(feature = "runtime")] -mod connect_socket; -mod connection; -mod copy_in; -mod copy_out; -mod execute; -mod idle; -mod maybe_tls_stream; -mod portal; -mod prepare; -mod query; -mod responses; -mod simple_query; -mod statement; -mod tls; -mod transaction; -mod typeinfo; -mod typeinfo_composite; -mod typeinfo_enum; - -pub use crate::proto::bind::BindFuture; -#[cfg(feature = "runtime")] -pub use crate::proto::cancel_query::CancelQueryFuture; -pub use crate::proto::cancel_query_raw::CancelQueryRawFuture; -pub use crate::proto::client::Client; -pub use crate::proto::codec::{FrontendMessage, PostgresCodec}; -#[cfg(feature = "runtime")] -pub use crate::proto::connect::ConnectFuture; -#[cfg(feature = "runtime")] -pub use crate::proto::connect_once::ConnectOnceFuture; -pub use crate::proto::connect_raw::ConnectRawFuture; -#[cfg(feature = "runtime")] -pub use crate::proto::connect_socket::ConnectSocketFuture; -pub use crate::proto::connection::Connection; -pub use crate::proto::copy_in::CopyInFuture; -pub use crate::proto::copy_out::CopyOutStream; -pub use crate::proto::execute::ExecuteFuture; -pub use crate::proto::maybe_tls_stream::MaybeTlsStream; -pub use crate::proto::portal::Portal; -pub use crate::proto::prepare::PrepareFuture; -pub use crate::proto::query::QueryStream; -pub use crate::proto::simple_query::SimpleQueryStream; -pub use crate::proto::statement::Statement; -pub use crate::proto::tls::TlsFuture; -pub use crate::proto::transaction::TransactionFuture; diff --git a/tokio-postgres/src/proto/portal.rs b/tokio-postgres/src/proto/portal.rs deleted file mode 100644 index 26b93e433..000000000 --- a/tokio-postgres/src/proto/portal.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::sync::Arc; - -use crate::proto::client::WeakClient; -use crate::proto::statement::Statement; - -struct Inner { - client: WeakClient, - name: String, - statement: Statement, -} - -impl Drop for Inner { - fn drop(&mut self) { - if let Some(client) = self.client.upgrade() { - client.close_portal(&self.name); - } - } -} - -#[derive(Clone)] -pub struct Portal(Arc); - -impl Portal { - pub fn new(client: WeakClient, name: String, statement: Statement) -> Portal { - Portal(Arc::new(Inner { - client, - name, - statement, - })) - } - - pub fn name(&self) -> &str { - &self.0.name - } - - pub fn statement(&self) -> &Statement { - &self.0.statement - } -} diff --git a/tokio-postgres/src/proto/prepare.rs b/tokio-postgres/src/proto/prepare.rs deleted file mode 100644 index a29aca11b..000000000 --- a/tokio-postgres/src/proto/prepare.rs +++ /dev/null @@ -1,228 +0,0 @@ -#![allow(clippy::large_enum_variant)] - -use fallible_iterator::FallibleIterator; -use futures::{try_ready, Future, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::mem; -use std::vec; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::proto::typeinfo::TypeinfoFuture; -use crate::types::{Oid, Type}; -use crate::{Column, Error}; - -#[derive(StateMachineFuture)] -pub enum Prepare { - #[state_machine_future(start, transitions(ReadParseComplete))] - Start { - client: Client, - request: PendingRequest, - name: String, - }, - #[state_machine_future(transitions(ReadParameterDescription))] - ReadParseComplete { - client: Client, - receiver: Responses, - name: String, - }, - #[state_machine_future(transitions(ReadRowDescription))] - ReadParameterDescription { - client: Client, - receiver: Responses, - name: String, - }, - #[state_machine_future(transitions(GetParameterTypes, GetColumnTypes, Finished))] - ReadRowDescription { - client: Client, - receiver: Responses, - name: String, - parameters: Vec, - }, - #[state_machine_future(transitions(GetColumnTypes, Finished))] - GetParameterTypes { - future: TypeinfoFuture, - remaining_parameters: vec::IntoIter, - name: String, - parameters: Vec, - columns: Vec<(String, Oid)>, - }, - #[state_machine_future(transitions(Finished))] - GetColumnTypes { - future: TypeinfoFuture, - cur_column_name: String, - remaining_columns: vec::IntoIter<(String, Oid)>, - name: String, - parameters: Vec, - columns: Vec, - }, - #[state_machine_future(ready)] - Finished(Statement), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollPrepare for Prepare { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - let receiver = state.client.send(state.request)?; - - transition!(ReadParseComplete { - receiver, - name: state.name, - client: state.client, - }) - } - - fn poll_read_parse_complete<'a>( - state: &'a mut RentToOwn<'a, ReadParseComplete>, - ) -> Poll { - let message = try_ready!(state.receiver.poll()); - let state = state.take(); - - match message { - Some(Message::ParseComplete) => transition!(ReadParameterDescription { - receiver: state.receiver, - name: state.name, - client: state.client, - }), - Some(Message::ErrorResponse(body)) => Err(Error::db(body)), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_read_parameter_description<'a>( - state: &'a mut RentToOwn<'a, ReadParameterDescription>, - ) -> Poll { - let message = try_ready!(state.receiver.poll()); - let state = state.take(); - - match message { - Some(Message::ParameterDescription(body)) => transition!(ReadRowDescription { - receiver: state.receiver, - name: state.name, - parameters: body.parameters().collect().map_err(Error::parse)?, - client: state.client, - }), - Some(_) => Err(Error::unexpected_message()), - None => Err(Error::closed()), - } - } - - fn poll_read_row_description<'a>( - state: &'a mut RentToOwn<'a, ReadRowDescription>, - ) -> Poll { - let message = try_ready!(state.receiver.poll()); - let state = state.take(); - - let columns = match message { - Some(Message::RowDescription(body)) => body - .fields() - .map(|f| Ok((f.name().to_string(), f.type_oid()))) - .collect() - .map_err(Error::parse)?, - Some(Message::NoData) => vec![], - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - }; - - let mut parameters = state.parameters.into_iter(); - if let Some(oid) = parameters.next() { - transition!(GetParameterTypes { - future: TypeinfoFuture::new(oid, state.client), - remaining_parameters: parameters, - name: state.name, - parameters: vec![], - columns: columns, - }); - } - - let mut columns = columns.into_iter(); - if let Some((name, oid)) = columns.next() { - transition!(GetColumnTypes { - future: TypeinfoFuture::new(oid, state.client), - cur_column_name: name, - remaining_columns: columns, - name: state.name, - parameters: vec![], - columns: vec![], - }); - } - - transition!(Finished(Statement::new( - state.client.downgrade(), - state.name, - vec![], - vec![] - ))) - } - - fn poll_get_parameter_types<'a>( - state: &'a mut RentToOwn<'a, GetParameterTypes>, - ) -> Poll { - let client = loop { - let (ty, client) = try_ready!(state.future.poll()); - state.parameters.push(ty); - - match state.remaining_parameters.next() { - Some(oid) => state.future = TypeinfoFuture::new(oid, client), - None => break client, - } - }; - let state = state.take(); - - let mut columns = state.columns.into_iter(); - if let Some((name, oid)) = columns.next() { - transition!(GetColumnTypes { - future: TypeinfoFuture::new(oid, client), - cur_column_name: name, - remaining_columns: columns, - name: state.name, - parameters: state.parameters, - columns: vec![], - }) - } - - transition!(Finished(Statement::new( - client.downgrade(), - state.name, - state.parameters, - vec![], - ))) - } - - fn poll_get_column_types<'a>( - state: &'a mut RentToOwn<'a, GetColumnTypes>, - ) -> Poll { - let client = loop { - let (ty, client) = try_ready!(state.future.poll()); - let name = mem::replace(&mut state.cur_column_name, String::new()); - state.columns.push(Column::new(name, ty)); - - match state.remaining_columns.next() { - Some((name, oid)) => { - state.cur_column_name = name; - state.future = TypeinfoFuture::new(oid, client); - } - None => break client, - } - }; - let state = state.take(); - - transition!(Finished(Statement::new( - client.downgrade(), - state.name, - state.parameters, - state.columns, - ))) - } -} - -impl PrepareFuture { - pub fn new(client: Client, request: PendingRequest, name: String) -> PrepareFuture { - Prepare::start(client, request, name) - } -} diff --git a/tokio-postgres/src/proto/query.rs b/tokio-postgres/src/proto/query.rs deleted file mode 100644 index 2d84abdee..000000000 --- a/tokio-postgres/src/proto/query.rs +++ /dev/null @@ -1,121 +0,0 @@ -use futures::{Async, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use std::mem; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::portal::Portal; -use crate::proto::responses::Responses; -use crate::proto::statement::Statement; -use crate::{Error, Row}; - -pub trait StatementHolder { - fn statement(&self) -> &Statement; -} - -impl StatementHolder for Statement { - fn statement(&self) -> &Statement { - self - } -} - -impl StatementHolder for Portal { - fn statement(&self) -> &Statement { - self.statement() - } -} - -enum State { - Start { - client: Client, - request: PendingRequest, - statement: T, - }, - ReadingResponse { - receiver: Responses, - statement: T, - }, - Done, -} - -pub struct QueryStream(State); - -impl Stream for QueryStream -where - T: StatementHolder, -{ - type Item = Row; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - loop { - match mem::replace(&mut self.0, State::Done) { - State::Start { - client, - request, - statement, - } => { - let receiver = client.send(request)?; - self.0 = State::ReadingResponse { - receiver, - statement, - }; - } - State::ReadingResponse { - mut receiver, - statement, - } => { - let message = match receiver.poll() { - Ok(Async::Ready(message)) => message, - Ok(Async::NotReady) => { - self.0 = State::ReadingResponse { - receiver, - statement, - }; - break Ok(Async::NotReady); - } - Err(e) => return Err(e), - }; - - match message { - Some(Message::BindComplete) => { - self.0 = State::ReadingResponse { - receiver, - statement, - }; - } - Some(Message::ErrorResponse(body)) => break Err(Error::db(body)), - Some(Message::DataRow(body)) => { - let row = Row::new(statement.statement().clone(), body)?; - self.0 = State::ReadingResponse { - receiver, - statement, - }; - break Ok(Async::Ready(Some(row))); - } - Some(Message::EmptyQueryResponse) - | Some(Message::PortalSuspended) - | Some(Message::CommandComplete(_)) => { - break Ok(Async::Ready(None)); - } - Some(_) => break Err(Error::unexpected_message()), - None => break Err(Error::closed()), - } - } - State::Done => break Ok(Async::Ready(None)), - } - } - } -} - -impl QueryStream -where - T: StatementHolder, -{ - pub fn new(client: Client, request: PendingRequest, statement: T) -> QueryStream { - QueryStream(State::Start { - client, - request, - statement, - }) - } -} diff --git a/tokio-postgres/src/proto/responses.rs b/tokio-postgres/src/proto/responses.rs deleted file mode 100644 index 7cc259a83..000000000 --- a/tokio-postgres/src/proto/responses.rs +++ /dev/null @@ -1,42 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::sync::mpsc; -use futures::{try_ready, Async, Poll, Stream}; -use postgres_protocol::message::backend; - -use crate::proto::codec::BackendMessages; -use crate::Error; - -pub fn channel() -> (mpsc::Sender, Responses) { - let (sender, receiver) = mpsc::channel(1); - - ( - sender, - Responses { - receiver, - cur: BackendMessages::empty(), - }, - ) -} - -pub struct Responses { - receiver: mpsc::Receiver, - cur: BackendMessages, -} - -impl Stream for Responses { - type Item = backend::Message; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - loop { - if let Some(message) = self.cur.next().map_err(Error::parse)? { - return Ok(Async::Ready(Some(message))); - } - - match try_ready!(self.receiver.poll().map_err(|()| Error::closed())) { - Some(messages) => self.cur = messages, - None => return Ok(Async::Ready(None)), - } - } - } -} diff --git a/tokio-postgres/src/proto/simple_query.rs b/tokio-postgres/src/proto/simple_query.rs deleted file mode 100644 index fdfb52270..000000000 --- a/tokio-postgres/src/proto/simple_query.rs +++ /dev/null @@ -1,113 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::{Async, Poll, Stream}; -use postgres_protocol::message::backend::Message; -use std::mem; -use std::sync::Arc; - -use crate::proto::client::{Client, PendingRequest}; -use crate::proto::responses::Responses; -use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; - -pub enum State { - Start { - client: Client, - request: PendingRequest, - }, - ReadResponse { - columns: Option>, - receiver: Responses, - }, - Done, -} - -pub struct SimpleQueryStream(State); - -impl Stream for SimpleQueryStream { - type Item = SimpleQueryMessage; - type Error = Error; - - fn poll(&mut self) -> Poll, Error> { - loop { - match mem::replace(&mut self.0, State::Done) { - State::Start { client, request } => { - let receiver = client.send(request)?; - self.0 = State::ReadResponse { - columns: None, - receiver, - }; - } - State::ReadResponse { - columns, - mut receiver, - } => { - let message = match receiver.poll() { - Ok(Async::Ready(message)) => message, - Ok(Async::NotReady) => { - self.0 = State::ReadResponse { columns, receiver }; - return Ok(Async::NotReady); - } - Err(e) => return Err(e), - }; - - match message { - Some(Message::CommandComplete(body)) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - self.0 = State::ReadResponse { - columns: None, - receiver, - }; - return Ok(Async::Ready(Some(SimpleQueryMessage::CommandComplete( - rows, - )))); - } - Some(Message::EmptyQueryResponse) => { - self.0 = State::ReadResponse { - columns: None, - receiver, - }; - return Ok(Async::Ready(Some(SimpleQueryMessage::CommandComplete(0)))); - } - Some(Message::RowDescription(body)) => { - let columns = body - .fields() - .map(|f| Ok(f.name().to_string())) - .collect::>() - .map_err(Error::parse)? - .into(); - self.0 = State::ReadResponse { - columns: Some(columns), - receiver, - }; - } - Some(Message::DataRow(body)) => { - let row = match &columns { - Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, - None => return Err(Error::unexpected_message()), - }; - self.0 = State::ReadResponse { columns, receiver }; - return Ok(Async::Ready(Some(SimpleQueryMessage::Row(row)))); - } - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(Message::ReadyForQuery(_)) => return Ok(Async::Ready(None)), - Some(_) => return Err(Error::unexpected_message()), - None => return Err(Error::closed()), - } - } - State::Done => return Ok(Async::Ready(None)), - } - } - } -} - -impl SimpleQueryStream { - pub fn new(client: Client, request: PendingRequest) -> SimpleQueryStream { - SimpleQueryStream(State::Start { client, request }) - } -} diff --git a/tokio-postgres/src/proto/statement.rs b/tokio-postgres/src/proto/statement.rs deleted file mode 100644 index 94703a48b..000000000 --- a/tokio-postgres/src/proto/statement.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; - -use crate::proto::client::WeakClient; -use crate::types::Type; -use crate::Column; - -pub struct StatementInner { - client: WeakClient, - name: String, - params: Vec, - columns: Vec, -} - -impl Drop for StatementInner { - fn drop(&mut self) { - if let Some(client) = self.client.upgrade() { - client.close_statement(&self.name); - } - } -} - -#[derive(Clone)] -pub struct Statement(Arc); - -impl Statement { - pub fn new( - client: WeakClient, - name: String, - params: Vec, - columns: Vec, - ) -> Statement { - Statement(Arc::new(StatementInner { - client, - name, - params, - columns, - })) - } - - pub fn name(&self) -> &str { - &self.0.name - } - - pub fn params(&self) -> &[Type] { - &self.0.params - } - - pub fn columns(&self) -> &[Column] { - &self.0.columns - } -} diff --git a/tokio-postgres/src/proto/tls.rs b/tokio-postgres/src/proto/tls.rs deleted file mode 100644 index 00df2ed19..000000000 --- a/tokio-postgres/src/proto/tls.rs +++ /dev/null @@ -1,118 +0,0 @@ -use futures::{try_ready, Future, Poll}; -use postgres_protocol::message::frontend; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use tokio_io::io::{self, ReadExact, WriteAll}; -use tokio_io::{AsyncRead, AsyncWrite}; - -use crate::config::SslMode; -use crate::proto::MaybeTlsStream; -use crate::tls::private::ForcePrivateApi; -use crate::tls::ChannelBinding; -use crate::{Error, TlsConnect}; - -#[derive(StateMachineFuture)] -pub enum Tls -where - T: TlsConnect, - S: AsyncRead + AsyncWrite, -{ - #[state_machine_future(start, transitions(SendingTls, Ready))] - Start { stream: S, mode: SslMode, tls: T }, - #[state_machine_future(transitions(ReadingTls))] - SendingTls { - future: WriteAll>, - mode: SslMode, - tls: T, - }, - #[state_machine_future(transitions(ConnectingTls, Ready))] - ReadingTls { - future: ReadExact, - mode: SslMode, - tls: T, - }, - #[state_machine_future(transitions(Ready))] - ConnectingTls { future: T::Future }, - #[state_machine_future(ready)] - Ready((MaybeTlsStream, ChannelBinding)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollTls for Tls -where - T: TlsConnect, - S: AsyncRead + AsyncWrite, -{ - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll, Error> { - let state = state.take(); - - match state.mode { - SslMode::Disable => transition!(Ready(( - MaybeTlsStream::Raw(state.stream), - ChannelBinding::none() - ))), - SslMode::Prefer if !state.tls.can_connect(ForcePrivateApi) => transition!(Ready(( - MaybeTlsStream::Raw(state.stream), - ChannelBinding::none() - ))), - SslMode::Prefer | SslMode::Require => { - let mut buf = vec![]; - frontend::ssl_request(&mut buf); - - transition!(SendingTls { - future: io::write_all(state.stream, buf), - mode: state.mode, - tls: state.tls, - }) - } - SslMode::__NonExhaustive => unreachable!(), - } - } - - fn poll_sending_tls<'a>( - state: &'a mut RentToOwn<'a, SendingTls>, - ) -> Poll, Error> { - let (stream, _) = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - transition!(ReadingTls { - future: io::read_exact(stream, [0]), - mode: state.mode, - tls: state.tls, - }) - } - - fn poll_reading_tls<'a>( - state: &'a mut RentToOwn<'a, ReadingTls>, - ) -> Poll, Error> { - let (stream, buf) = try_ready!(state.future.poll().map_err(Error::io)); - let state = state.take(); - - if buf[0] == b'S' { - transition!(ConnectingTls { - future: state.tls.connect(stream), - }) - } else if state.mode == SslMode::Require { - Err(Error::tls("server does not support TLS".into())) - } else { - transition!(Ready((MaybeTlsStream::Raw(stream), ChannelBinding::none()))) - } - } - - fn poll_connecting_tls<'a>( - state: &'a mut RentToOwn<'a, ConnectingTls>, - ) -> Poll, Error> { - let (stream, channel_binding) = - try_ready!(state.future.poll().map_err(|e| Error::tls(e.into()))); - transition!(Ready((MaybeTlsStream::Tls(stream), channel_binding))) - } -} - -impl TlsFuture -where - T: TlsConnect, - S: AsyncRead + AsyncWrite, -{ - pub fn new(stream: S, mode: SslMode, tls: T) -> TlsFuture { - Tls::start(stream, mode, tls) - } -} diff --git a/tokio-postgres/src/proto/transaction.rs b/tokio-postgres/src/proto/transaction.rs deleted file mode 100644 index fbd4e5c63..000000000 --- a/tokio-postgres/src/proto/transaction.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::proto::client::Client; -use crate::proto::simple_query::SimpleQueryStream; -use futures::{try_ready, Async, Future, Poll, Stream}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::Error; - -#[derive(StateMachineFuture)] -pub enum Transaction -where - F: Future, - E: From, -{ - #[state_machine_future(start, transitions(Beginning))] - Start { client: Client, future: F }, - #[state_machine_future(transitions(Running))] - Beginning { - client: Client, - begin: SimpleQueryStream, - future: F, - }, - #[state_machine_future(transitions(Finishing))] - Running { client: Client, future: F }, - #[state_machine_future(transitions(Finished))] - Finishing { - future: SimpleQueryStream, - result: Result, - }, - #[state_machine_future(ready)] - Finished(T), - #[state_machine_future(error)] - Failed(E), -} - -impl PollTransaction for Transaction -where - F: Future, - E: From, -{ - fn poll_start<'a>( - state: &'a mut RentToOwn<'a, Start>, - ) -> Poll, E> { - let state = state.take(); - transition!(Beginning { - begin: state.client.simple_query("BEGIN"), - client: state.client, - future: state.future, - }) - } - - fn poll_beginning<'a>( - state: &'a mut RentToOwn<'a, Beginning>, - ) -> Poll, E> { - while let Some(_) = try_ready!(state.begin.poll()) {} - - let state = state.take(); - transition!(Running { - client: state.client, - future: state.future, - }) - } - - fn poll_running<'a>( - state: &'a mut RentToOwn<'a, Running>, - ) -> Poll, E> { - match state.future.poll() { - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(t)) => transition!(Finishing { - future: state.client.simple_query("COMMIT"), - result: Ok(t), - }), - Err(e) => transition!(Finishing { - future: state.client.simple_query("ROLLBACK"), - result: Err(e), - }), - } - } - - fn poll_finishing<'a>( - state: &'a mut RentToOwn<'a, Finishing>, - ) -> Poll, E> { - loop { - match state.future.poll() { - Ok(Async::NotReady) => return Ok(Async::NotReady), - Ok(Async::Ready(Some(_))) => {} - Ok(Async::Ready(None)) => { - let t = state.take().result?; - transition!(Finished(t)) - } - Err(e) => match state.take().result { - Ok(_) => return Err(e.into()), - // prioritize the future's error over the rollback error - Err(e) => return Err(e), - }, - } - } - } -} - -impl TransactionFuture -where - F: Future, - E: From, -{ - pub fn new(client: Client, future: F) -> TransactionFuture { - Transaction::start(client, future) - } -} diff --git a/tokio-postgres/src/proto/typeinfo.rs b/tokio-postgres/src/proto/typeinfo.rs deleted file mode 100644 index 15a657b6c..000000000 --- a/tokio-postgres/src/proto/typeinfo.rs +++ /dev/null @@ -1,336 +0,0 @@ -use futures::stream::{self, Stream}; -use futures::{try_ready, Async, Future, Poll}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::error::{Error, SqlState}; -use crate::next_statement; -use crate::proto::client::Client; -use crate::proto::prepare::PrepareFuture; -use crate::proto::query::QueryStream; -use crate::proto::statement::Statement; -use crate::proto::typeinfo_composite::TypeinfoCompositeFuture; -use crate::proto::typeinfo_enum::TypeinfoEnumFuture; -use crate::types::{Kind, Oid, ToSql, Type}; - -const TYPEINFO_QUERY: &str = " -SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid -FROM pg_catalog.pg_type t -LEFT OUTER JOIN pg_catalog.pg_range r ON r.rngtypid = t.oid -INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid -WHERE t.oid = $1 -"; - -// Range types weren't added until Postgres 9.2, so pg_range may not exist -const TYPEINFO_FALLBACK_QUERY: &str = " -SELECT t.typname, t.typtype, t.typelem, NULL::OID, t.typbasetype, n.nspname, t.typrelid -FROM pg_catalog.pg_type t -INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid -WHERE t.oid = $1 -"; - -#[derive(StateMachineFuture)] -pub enum Typeinfo { - #[state_machine_future(start, transitions(PreparingTypeinfo, QueryingTypeinfo, Finished))] - Start { oid: Oid, client: Client }, - #[state_machine_future(transitions(PreparingTypeinfoFallback, QueryingTypeinfo))] - PreparingTypeinfo { - future: Box, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions(QueryingTypeinfo))] - PreparingTypeinfoFallback { - future: Box, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions( - CachingType, - QueryingEnumVariants, - QueryingDomainBasetype, - QueryingArrayElem, - QueryingCompositeFields, - QueryingRangeSubtype - ))] - QueryingTypeinfo { - future: stream::Collect>, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions(CachingType))] - QueryingEnumVariants { - future: TypeinfoEnumFuture, - name: String, - oid: Oid, - schema: String, - }, - #[state_machine_future(transitions(CachingType))] - QueryingDomainBasetype { - future: Box, - name: String, - oid: Oid, - schema: String, - }, - #[state_machine_future(transitions(CachingType))] - QueryingArrayElem { - future: Box, - name: String, - oid: Oid, - schema: String, - }, - #[state_machine_future(transitions(CachingType))] - QueryingCompositeFields { - future: TypeinfoCompositeFuture, - name: String, - oid: Oid, - schema: String, - }, - #[state_machine_future(transitions(CachingType))] - QueryingRangeSubtype { - future: Box, - name: String, - oid: Oid, - schema: String, - }, - #[state_machine_future(transitions(Finished))] - CachingType { ty: Type, oid: Oid, client: Client }, - #[state_machine_future(ready)] - Finished((Type, Client)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollTypeinfo for Typeinfo { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - - if let Some(ty) = Type::from_oid(state.oid) { - transition!(Finished((ty, state.client))); - } - - if let Some(ty) = state.client.cached_type(state.oid) { - transition!(Finished((ty, state.client))); - } - - match state.client.typeinfo_query() { - Some(statement) => transition!(QueryingTypeinfo { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - oid: state.oid, - client: state.client, - }), - None => transition!(PreparingTypeinfo { - future: Box::new(state.client.prepare(next_statement(), TYPEINFO_QUERY, &[])), - oid: state.oid, - client: state.client, - }), - } - } - - fn poll_preparing_typeinfo<'a>( - state: &'a mut RentToOwn<'a, PreparingTypeinfo>, - ) -> Poll { - let statement = match state.future.poll() { - Ok(Async::Ready(statement)) => statement, - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { - let state = state.take(); - - transition!(PreparingTypeinfoFallback { - future: Box::new(state.client.prepare( - next_statement(), - TYPEINFO_FALLBACK_QUERY, - &[] - )), - oid: state.oid, - client: state.client, - }) - } - Err(e) => return Err(e), - }; - let state = state.take(); - - let future = state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(); - state.client.set_typeinfo_query(&statement); - transition!(QueryingTypeinfo { - future, - oid: state.oid, - client: state.client - }) - } - - fn poll_preparing_typeinfo_fallback<'a>( - state: &'a mut RentToOwn<'a, PreparingTypeinfoFallback>, - ) -> Poll { - let statement = try_ready!(state.future.poll()); - let state = state.take(); - - let future = state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(); - state.client.set_typeinfo_query(&statement); - transition!(QueryingTypeinfo { - future, - oid: state.oid, - client: state.client - }) - } - - fn poll_querying_typeinfo<'a>( - state: &'a mut RentToOwn<'a, QueryingTypeinfo>, - ) -> Poll { - let rows = try_ready!(state.future.poll()); - let state = state.take(); - - let row = match rows.get(0) { - Some(row) => row, - None => return Err(Error::unexpected_message()), - }; - - let name = row.try_get::<_, String>(0)?; - let type_ = row.try_get::<_, i8>(1)?; - let elem_oid = row.try_get::<_, Oid>(2)?; - let rngsubtype = row.try_get::<_, Option>(3)?; - let basetype = row.try_get::<_, Oid>(4)?; - let schema = row.try_get::<_, String>(5)?; - let relid = row.try_get::<_, Oid>(6)?; - - let kind = if type_ == b'e' as i8 { - transition!(QueryingEnumVariants { - future: TypeinfoEnumFuture::new(state.oid, state.client), - name, - oid: state.oid, - schema, - }) - } else if type_ == b'p' as i8 { - Kind::Pseudo - } else if basetype != 0 { - transition!(QueryingDomainBasetype { - future: Box::new(TypeinfoFuture::new(basetype, state.client)), - name, - oid: state.oid, - schema, - }) - } else if elem_oid != 0 { - transition!(QueryingArrayElem { - future: Box::new(TypeinfoFuture::new(elem_oid, state.client)), - name, - oid: state.oid, - schema, - }) - } else if relid != 0 { - transition!(QueryingCompositeFields { - future: TypeinfoCompositeFuture::new(relid, state.client), - name, - oid: state.oid, - schema, - }) - } else if let Some(rngsubtype) = rngsubtype { - transition!(QueryingRangeSubtype { - future: Box::new(TypeinfoFuture::new(rngsubtype, state.client)), - name, - oid: state.oid, - schema, - }) - } else { - Kind::Simple - }; - - let ty = Type::_new(name.to_string(), state.oid, kind, schema.to_string()); - transition!(CachingType { - ty, - oid: state.oid, - client: state.client, - }) - } - - fn poll_querying_enum_variants<'a>( - state: &'a mut RentToOwn<'a, QueryingEnumVariants>, - ) -> Poll { - let (variants, client) = try_ready!(state.future.poll()); - let state = state.take(); - - let ty = Type::_new(state.name, state.oid, Kind::Enum(variants), state.schema); - transition!(CachingType { - ty, - oid: state.oid, - client, - }) - } - - fn poll_querying_domain_basetype<'a>( - state: &'a mut RentToOwn<'a, QueryingDomainBasetype>, - ) -> Poll { - let (basetype, client) = try_ready!(state.future.poll()); - let state = state.take(); - - let ty = Type::_new(state.name, state.oid, Kind::Domain(basetype), state.schema); - transition!(CachingType { - ty, - oid: state.oid, - client, - }) - } - - fn poll_querying_array_elem<'a>( - state: &'a mut RentToOwn<'a, QueryingArrayElem>, - ) -> Poll { - let (elem, client) = try_ready!(state.future.poll()); - let state = state.take(); - - let ty = Type::_new(state.name, state.oid, Kind::Array(elem), state.schema); - transition!(CachingType { - ty, - oid: state.oid, - client, - }) - } - - fn poll_querying_composite_fields<'a>( - state: &'a mut RentToOwn<'a, QueryingCompositeFields>, - ) -> Poll { - let (fields, client) = try_ready!(state.future.poll()); - let state = state.take(); - - let ty = Type::_new(state.name, state.oid, Kind::Composite(fields), state.schema); - transition!(CachingType { - ty, - oid: state.oid, - client, - }) - } - - fn poll_querying_range_subtype<'a>( - state: &'a mut RentToOwn<'a, QueryingRangeSubtype>, - ) -> Poll { - let (subtype, client) = try_ready!(state.future.poll()); - let state = state.take(); - - let ty = Type::_new(state.name, state.oid, Kind::Range(subtype), state.schema); - transition!(CachingType { - ty, - oid: state.oid, - client, - }) - } - - fn poll_caching_type<'a>( - state: &'a mut RentToOwn<'a, CachingType>, - ) -> Poll { - let state = state.take(); - state.client.cache_type(&state.ty); - transition!(Finished((state.ty, state.client))) - } -} - -impl TypeinfoFuture { - pub fn new(oid: Oid, client: Client) -> TypeinfoFuture { - Typeinfo::start(oid, client) - } -} diff --git a/tokio-postgres/src/proto/typeinfo_composite.rs b/tokio-postgres/src/proto/typeinfo_composite.rs deleted file mode 100644 index 31398389d..000000000 --- a/tokio-postgres/src/proto/typeinfo_composite.rs +++ /dev/null @@ -1,150 +0,0 @@ -use futures::stream::{self, Stream}; -use futures::{try_ready, Future, Poll}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; -use std::mem; -use std::vec; - -use crate::error::Error; -use crate::next_statement; -use crate::proto::client::Client; -use crate::proto::prepare::PrepareFuture; -use crate::proto::query::QueryStream; -use crate::proto::statement::Statement; -use crate::proto::typeinfo::TypeinfoFuture; -use crate::types::{Field, Oid, ToSql}; - -const TYPEINFO_COMPOSITE_QUERY: &str = " -SELECT attname, atttypid -FROM pg_catalog.pg_attribute -WHERE attrelid = $1 -AND NOT attisdropped -AND attnum > 0 -ORDER BY attnum -"; - -#[derive(StateMachineFuture)] -pub enum TypeinfoComposite { - #[state_machine_future( - start, - transitions(PreparingTypeinfoComposite, QueryingCompositeFields) - )] - Start { oid: Oid, client: Client }, - #[state_machine_future(transitions(QueryingCompositeFields))] - PreparingTypeinfoComposite { - future: Box, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions(QueryingCompositeFieldTypes, Finished))] - QueryingCompositeFields { - future: stream::Collect>, - client: Client, - }, - #[state_machine_future(transitions(Finished))] - QueryingCompositeFieldTypes { - future: Box, - cur_field_name: String, - remaining_fields: vec::IntoIter<(String, Oid)>, - fields: Vec, - }, - #[state_machine_future(ready)] - Finished((Vec, Client)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollTypeinfoComposite for TypeinfoComposite { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - - match state.client.typeinfo_composite_query() { - Some(statement) => transition!(QueryingCompositeFields { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - client: state.client, - }), - None => transition!(PreparingTypeinfoComposite { - future: Box::new(state.client.prepare( - next_statement(), - TYPEINFO_COMPOSITE_QUERY, - &[] - )), - oid: state.oid, - client: state.client, - }), - } - } - - fn poll_preparing_typeinfo_composite<'a>( - state: &'a mut RentToOwn<'a, PreparingTypeinfoComposite>, - ) -> Poll { - let statement = try_ready!(state.future.poll()); - let state = state.take(); - - state.client.set_typeinfo_composite_query(&statement); - transition!(QueryingCompositeFields { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - client: state.client, - }) - } - - fn poll_querying_composite_fields<'a>( - state: &'a mut RentToOwn<'a, QueryingCompositeFields>, - ) -> Poll { - let rows = try_ready!(state.future.poll()); - let state = state.take(); - - let fields = rows - .iter() - .map(|row| { - let name = row.try_get(0)?; - let oid = row.try_get(1)?; - Ok((name, oid)) - }) - .collect::, Error>>()?; - - let mut remaining_fields = fields.into_iter(); - match remaining_fields.next() { - Some((cur_field_name, oid)) => transition!(QueryingCompositeFieldTypes { - future: Box::new(TypeinfoFuture::new(oid, state.client)), - cur_field_name, - fields: vec![], - remaining_fields, - }), - None => transition!(Finished((vec![], state.client))), - } - } - - fn poll_querying_composite_field_types<'a>( - state: &'a mut RentToOwn<'a, QueryingCompositeFieldTypes>, - ) -> Poll { - loop { - let (ty, client) = try_ready!(state.future.poll()); - - let name = mem::replace(&mut state.cur_field_name, String::new()); - state.fields.push(Field::new(name, ty)); - - match state.remaining_fields.next() { - Some((cur_field_name, oid)) => { - state.cur_field_name = cur_field_name; - state.future = Box::new(TypeinfoFuture::new(oid, client)); - } - None => { - let state = state.take(); - transition!(Finished((state.fields, client))); - } - } - } - } -} - -impl TypeinfoCompositeFuture { - pub fn new(oid: Oid, client: Client) -> TypeinfoCompositeFuture { - TypeinfoComposite::start(oid, client) - } -} diff --git a/tokio-postgres/src/proto/typeinfo_enum.rs b/tokio-postgres/src/proto/typeinfo_enum.rs deleted file mode 100644 index dbc391070..000000000 --- a/tokio-postgres/src/proto/typeinfo_enum.rs +++ /dev/null @@ -1,147 +0,0 @@ -use futures::stream::{self, Stream}; -use futures::{try_ready, Async, Future, Poll}; -use state_machine_future::{transition, RentToOwn, StateMachineFuture}; - -use crate::error::{Error, SqlState}; -use crate::next_statement; -use crate::proto::client::Client; -use crate::proto::prepare::PrepareFuture; -use crate::proto::query::QueryStream; -use crate::proto::statement::Statement; -use crate::types::{Oid, ToSql}; - -const TYPEINFO_ENUM_QUERY: &str = " -SELECT enumlabel -FROM pg_catalog.pg_enum -WHERE enumtypid = $1 -ORDER BY enumsortorder -"; - -// Postgres 9.0 didn't have enumsortorder -const TYPEINFO_ENUM_FALLBACK_QUERY: &str = " -SELECT enumlabel -FROM pg_catalog.pg_enum -WHERE enumtypid = $1 -ORDER BY oid -"; - -#[derive(StateMachineFuture)] -pub enum TypeinfoEnum { - #[state_machine_future(start, transitions(PreparingTypeinfoEnum, QueryingEnumVariants))] - Start { oid: Oid, client: Client }, - #[state_machine_future(transitions(PreparingTypeinfoEnumFallback, QueryingEnumVariants))] - PreparingTypeinfoEnum { - future: Box, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions(QueryingEnumVariants))] - PreparingTypeinfoEnumFallback { - future: Box, - oid: Oid, - client: Client, - }, - #[state_machine_future(transitions(Finished))] - QueryingEnumVariants { - future: stream::Collect>, - client: Client, - }, - #[state_machine_future(ready)] - Finished((Vec, Client)), - #[state_machine_future(error)] - Failed(Error), -} - -impl PollTypeinfoEnum for TypeinfoEnum { - fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start>) -> Poll { - let state = state.take(); - - match state.client.typeinfo_enum_query() { - Some(statement) => transition!(QueryingEnumVariants { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - client: state.client, - }), - None => transition!(PreparingTypeinfoEnum { - future: Box::new( - state - .client - .prepare(next_statement(), TYPEINFO_ENUM_QUERY, &[]) - ), - oid: state.oid, - client: state.client, - }), - } - } - - fn poll_preparing_typeinfo_enum<'a>( - state: &'a mut RentToOwn<'a, PreparingTypeinfoEnum>, - ) -> Poll { - let statement = match state.future.poll() { - Ok(Async::Ready(statement)) => statement, - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { - let state = state.take(); - - transition!(PreparingTypeinfoEnumFallback { - future: Box::new(state.client.prepare( - next_statement(), - TYPEINFO_ENUM_FALLBACK_QUERY, - &[] - )), - oid: state.oid, - client: state.client, - }) - } - Err(e) => return Err(e), - }; - let state = state.take(); - - state.client.set_typeinfo_enum_query(&statement); - transition!(QueryingEnumVariants { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - client: state.client, - }) - } - - fn poll_preparing_typeinfo_enum_fallback<'a>( - state: &'a mut RentToOwn<'a, PreparingTypeinfoEnumFallback>, - ) -> Poll { - let statement = try_ready!(state.future.poll()); - let state = state.take(); - - state.client.set_typeinfo_enum_query(&statement); - transition!(QueryingEnumVariants { - future: state - .client - .query(&statement, [&state.oid as &dyn ToSql].iter().cloned()) - .collect(), - client: state.client, - }) - } - - fn poll_querying_enum_variants<'a>( - state: &'a mut RentToOwn<'a, QueryingEnumVariants>, - ) -> Poll { - let rows = try_ready!(state.future.poll()); - let state = state.take(); - - let variants = rows - .iter() - .map(|row| row.try_get(0)) - .collect::, _>>()?; - - transition!(Finished((variants, state.client))) - } -} - -impl TypeinfoEnumFuture { - pub fn new(oid: Oid, client: Client) -> TypeinfoEnumFuture { - TypeinfoEnum::start(oid, client) - } -} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs deleted file mode 100644 index aeb9c2849..000000000 --- a/tokio-postgres/src/row.rs +++ /dev/null @@ -1,243 +0,0 @@ -//! Rows. - -use fallible_iterator::FallibleIterator; -use postgres_protocol::message::backend::DataRowBody; -use std::fmt; -use std::ops::Range; -use std::str; -use std::sync::Arc; - -use crate::proto; -use crate::row::sealed::{AsName, Sealed}; -use crate::stmt::Column; -use crate::types::{FromSql, Type, WrongType}; -use crate::Error; - -mod sealed { - pub trait Sealed {} - - pub trait AsName { - fn as_name(&self) -> &str; - } -} - -impl AsName for Column { - fn as_name(&self) -> &str { - self.name() - } -} - -impl AsName for String { - fn as_name(&self) -> &str { - self - } -} - -/// A trait implemented by types that can index into columns of a row. -/// -/// This cannot be implemented outside of this crate. -pub trait RowIndex: Sealed { - #[doc(hidden)] - fn __idx(&self, columns: &[T]) -> Option - where - T: AsName; -} - -impl Sealed for usize {} - -impl RowIndex for usize { - #[inline] - fn __idx(&self, columns: &[T]) -> Option - where - T: AsName, - { - if *self >= columns.len() { - None - } else { - Some(*self) - } - } -} - -impl Sealed for str {} - -impl RowIndex for str { - #[inline] - fn __idx(&self, columns: &[T]) -> Option - where - T: AsName, - { - if let Some(idx) = columns.iter().position(|d| d.as_name() == self) { - return Some(idx); - }; - - // FIXME ASCII-only case insensitivity isn't really the right thing to - // do. Postgres itself uses a dubious wrapper around tolower and JDBC - // uses the US locale. - columns - .iter() - .position(|d| d.as_name().eq_ignore_ascii_case(self)) - } -} - -impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} - -impl<'a, T> RowIndex for &'a T -where - T: ?Sized + RowIndex, -{ - #[inline] - fn __idx(&self, columns: &[U]) -> Option - where - U: AsName, - { - T::__idx(*self, columns) - } -} - -/// A row of data returned from the database by a query. -pub struct Row { - statement: proto::Statement, - body: DataRowBody, - ranges: Vec>>, -} - -impl Row { - #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(statement: proto::Statement, body: DataRowBody) -> Result { - let ranges = body.ranges().collect().map_err(Error::parse)?; - Ok(Row { - statement, - body, - ranges, - }) - } - - /// Returns information about the columns of data in the row. - pub fn columns(&self) -> &[Column] { - self.statement.columns() - } - - /// Determines if the row contains no values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the number of values in the row. - pub fn len(&self) -> usize { - self.columns().len() - } - - /// Deserializes a value from the row. - /// - /// The value can be specified either by its numeric index in the row, or by its column name. - /// - /// # Panics - /// - /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. - pub fn get<'a, I, T>(&'a self, idx: I) -> T - where - I: RowIndex + fmt::Display, - T: FromSql<'a>, - { - match self.get_inner(&idx) { - Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), - } - } - - /// Like `Row::get`, but returns a `Result` rather than panicking. - pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result - where - I: RowIndex, - T: FromSql<'a>, - { - self.get_inner(&idx) - } - - fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result - where - I: RowIndex, - T: FromSql<'a>, - { - let idx = match idx.__idx(self.columns()) { - Some(idx) => idx, - None => return Err(Error::column()), - }; - - let ty = self.columns()[idx].type_(); - if !T::accepts(ty) { - return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())), idx)); - } - - let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - FromSql::from_sql_nullable(ty, buf).map_err(|e| Error::from_sql(e, idx)) - } -} - -/// A row of data returned from the database by a simple query. -pub struct SimpleQueryRow { - columns: Arc<[String]>, - body: DataRowBody, - ranges: Vec>>, -} - -impl SimpleQueryRow { - #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(columns: Arc<[String]>, body: DataRowBody) -> Result { - let ranges = body.ranges().collect().map_err(Error::parse)?; - Ok(SimpleQueryRow { - columns, - body, - ranges, - }) - } - - /// Determines if the row contains no values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the number of values in the row. - pub fn len(&self) -> usize { - self.columns.len() - } - - /// Returns a value from the row. - /// - /// The value can be specified either by its numeric index in the row, or by its column name. - /// - /// # Panics - /// - /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. - pub fn get(&self, idx: I) -> Option<&str> - where - I: RowIndex + fmt::Display, - { - match self.get_inner(&idx) { - Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), - } - } - - /// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking. - pub fn try_get(&self, idx: I) -> Result, Error> - where - I: RowIndex, - { - self.get_inner(&idx) - } - - fn get_inner(&self, idx: &I) -> Result, Error> - where - I: RowIndex, - { - let idx = match idx.__idx(&self.columns) { - Some(idx) => idx, - None => return Err(Error::column()), - }; - - let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx)) - } -} diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs deleted file mode 100644 index 3931fa5e1..000000000 --- a/tokio-postgres/src/socket.rs +++ /dev/null @@ -1,101 +0,0 @@ -use bytes::{Buf, BufMut}; -use futures::Poll; -use std::io::{self, Read, Write}; -use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_tcp::TcpStream; -#[cfg(unix)] -use tokio_uds::UnixStream; - -#[derive(Debug)] -enum Inner { - Tcp(TcpStream), - #[cfg(unix)] - Unix(UnixStream), -} - -/// The standard stream type used by the crate. -/// -/// Requires the `runtime` Cargo feature (enabled by default). -#[derive(Debug)] -pub struct Socket(Inner); - -impl Socket { - pub(crate) fn new_tcp(stream: TcpStream) -> Socket { - Socket(Inner::Tcp(stream)) - } - - #[cfg(unix)] - pub(crate) fn new_unix(stream: UnixStream) -> Socket { - Socket(Inner::Unix(stream)) - } -} - -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - match &mut self.0 { - Inner::Tcp(s) => s.read(buf), - #[cfg(unix)] - Inner::Unix(s) => s.read(buf), - } - } -} - -impl AsyncRead for Socket { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { - match &self.0 { - Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), - #[cfg(unix)] - Inner::Unix(s) => s.prepare_uninitialized_buffer(buf), - } - } - - fn read_buf(&mut self, buf: &mut B) -> Poll - where - B: BufMut, - { - match &mut self.0 { - Inner::Tcp(s) => s.read_buf(buf), - #[cfg(unix)] - Inner::Unix(s) => s.read_buf(buf), - } - } -} - -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - match &mut self.0 { - Inner::Tcp(s) => s.write(buf), - #[cfg(unix)] - Inner::Unix(s) => s.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - match &mut self.0 { - Inner::Tcp(s) => s.flush(), - #[cfg(unix)] - Inner::Unix(s) => s.flush(), - } - } -} - -impl AsyncWrite for Socket { - fn shutdown(&mut self) -> Poll<(), io::Error> { - match &mut self.0 { - Inner::Tcp(s) => s.shutdown(), - #[cfg(unix)] - Inner::Unix(s) => s.shutdown(), - } - } - - fn write_buf(&mut self, buf: &mut B) -> Poll - where - B: Buf, - { - match &mut self.0 { - Inner::Tcp(s) => s.write_buf(buf), - #[cfg(unix)] - Inner::Unix(s) => s.write_buf(buf), - } - } -} diff --git a/tokio-postgres/src/stmt.rs b/tokio-postgres/src/stmt.rs deleted file mode 100644 index bf18dc076..000000000 --- a/tokio-postgres/src/stmt.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::types::Type; - -/// Information about a column of a Postgres query. -#[derive(Debug)] -pub struct Column { - name: String, - type_: Type, -} - -impl Column { - pub(crate) fn new(name: String, type_: Type) -> Column { - Column { name, type_ } - } - - /// Returns the name of the column. - pub fn name(&self) -> &str { - &self.name - } - - /// Returns the type of the column. - pub fn type_(&self) -> &Type { - &self.type_ - } -} diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs deleted file mode 100644 index 811be0a22..000000000 --- a/tokio-postgres/src/tls.rs +++ /dev/null @@ -1,141 +0,0 @@ -//! TLS support. - -use futures::future::{self, FutureResult}; -use futures::{Future, Poll}; -use std::error::Error; -use std::fmt; -use std::io::{self, Read, Write}; -use tokio_io::{AsyncRead, AsyncWrite}; - -pub(crate) mod private { - pub struct ForcePrivateApi; -} - -/// Channel binding information returned from a TLS handshake. -pub struct ChannelBinding { - pub(crate) tls_server_end_point: Option>, -} - -impl ChannelBinding { - /// Creates a `ChannelBinding` containing no information. - pub fn none() -> ChannelBinding { - ChannelBinding { - tls_server_end_point: None, - } - } - - /// Creates a `ChannelBinding` containing `tls-server-end-point` channel binding information. - pub fn tls_server_end_point(tls_server_end_point: Vec) -> ChannelBinding { - ChannelBinding { - tls_server_end_point: Some(tls_server_end_point), - } - } -} - -/// A constructor of `TlsConnect`ors. -/// -/// Requires the `runtime` Cargo feature (enabled by default). -#[cfg(feature = "runtime")] -pub trait MakeTlsConnect { - /// The stream type created by the `TlsConnect` implementation. - type Stream: AsyncRead + AsyncWrite; - /// The `TlsConnect` implementation created by this type. - type TlsConnect: TlsConnect; - /// The error type retured by the `TlsConnect` implementation. - type Error: Into>; - - /// Creates a new `TlsConnect`or. - /// - /// The domain name is provided for certificate verification and SNI. - fn make_tls_connect(&mut self, domain: &str) -> Result; -} - -/// An asynchronous function wrapping a stream in a TLS session. -pub trait TlsConnect { - /// The stream returned by the future. - type Stream: AsyncRead + AsyncWrite; - /// The error type returned by the future. - type Error: Into>; - /// The future returned by the connector. - type Future: Future; - - /// Returns a future performing a TLS handshake over the stream. - fn connect(self, stream: S) -> Self::Future; - - #[doc(hidden)] - fn can_connect(&self, _: private::ForcePrivateApi) -> bool { - true - } -} - -/// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error. -/// -/// This can be used when `sslmode` is `none` or `prefer`. -#[derive(Debug, Copy, Clone)] -pub struct NoTls; - -#[cfg(feature = "runtime")] -impl MakeTlsConnect for NoTls where { - type Stream = NoTlsStream; - type TlsConnect = NoTls; - type Error = NoTlsError; - - fn make_tls_connect(&mut self, _: &str) -> Result { - Ok(NoTls) - } -} - -impl TlsConnect for NoTls { - type Stream = NoTlsStream; - type Error = NoTlsError; - type Future = FutureResult<(NoTlsStream, ChannelBinding), NoTlsError>; - - fn connect(self, _: S) -> FutureResult<(NoTlsStream, ChannelBinding), NoTlsError> { - future::err(NoTlsError(())) - } - - fn can_connect(&self, _: private::ForcePrivateApi) -> bool { - false - } -} - -/// The TLS "stream" type produced by the `NoTls` connector. -/// -/// Since `NoTls` doesn't support TLS, this type is uninhabited. -pub enum NoTlsStream {} - -impl Read for NoTlsStream { - fn read(&mut self, _: &mut [u8]) -> io::Result { - match *self {} - } -} - -impl AsyncRead for NoTlsStream {} - -impl Write for NoTlsStream { - fn write(&mut self, _: &[u8]) -> io::Result { - match *self {} - } - - fn flush(&mut self) -> io::Result<()> { - match *self {} - } -} - -impl AsyncWrite for NoTlsStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - match *self {} - } -} - -/// The error returned by `NoTls`. -#[derive(Debug)] -pub struct NoTlsError(()); - -impl fmt::Display for NoTlsError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.write_str("no TLS implementation configured") - } -} - -impl Error for NoTlsError {} From 89501f66d946f430cc45f1397728a12b23b40afc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2019 21:42:42 -0700 Subject: [PATCH 188/819] Start on std::futures rewrite connect_raw works! --- tokio-postgres/Cargo.toml | 11 +- tokio-postgres/src/client.rs | 22 + tokio-postgres/src/codec.rs | 99 +++ tokio-postgres/src/config.rs | 793 +++++++++++++++++++++++++ tokio-postgres/src/connect.rs | 1 + tokio-postgres/src/connect_raw.rs | 323 ++++++++++ tokio-postgres/src/connect_tls.rs | 48 ++ tokio-postgres/src/connection.rs | 34 ++ tokio-postgres/src/lib.rs | 14 + tokio-postgres/src/maybe_tls_stream.rs | 97 +++ tokio-postgres/src/tls.rs | 144 +++++ tokio-postgres/tests/test/main.rs | 150 ++--- 12 files changed, 1633 insertions(+), 103 deletions(-) create mode 100644 tokio-postgres/src/client.rs create mode 100644 tokio-postgres/src/codec.rs create mode 100644 tokio-postgres/src/config.rs create mode 100644 tokio-postgres/src/connect.rs create mode 100644 tokio-postgres/src/connect_raw.rs create mode 100644 tokio-postgres/src/connect_tls.rs create mode 100644 tokio-postgres/src/connection.rs create mode 100644 tokio-postgres/src/maybe_tls_stream.rs create mode 100644 tokio-postgres/src/tls.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f7a5cc1ce..7fbf32afc 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -21,7 +21,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "tokio-threadpool", "lazy_static"] +runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds", "tokio-threadpool", "lazy_static"] "with-bit-vec-0_5" = ["bit-vec-05"] "with-chrono-0_4" = ["chrono-04"] @@ -34,17 +34,15 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] antidote = "1.0" bytes = "0.4" fallible-iterator = "0.2" +futures-preview = "0.3.0-alpha.17" log = "0.4" percent-encoding = "1.0" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio-codec = { git = "https://github.com/tokio-rs/tokio" } -tokio-io = { git = "https://github.com/tokio-rs/tokio" } +tokio = { git = "https://github.com/tokio-rs/tokio", default-features = false, features = ["io", "codec"] } -tokio-tcp = { git = "https://github.com/tokio-rs/tokio", optional = true } tokio-threadpool = { git = "https://github.com/tokio-rs/tokio", optional = true } lazy_static = { version = "1.0", optional = true } -tokio-timer = { git = "https://github.com/tokio-rs/tokio", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } @@ -54,9 +52,6 @@ serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } -[target.'cfg(unix)'.dependencies] -tokio-uds = { git = "https://github.com/tokio-rs/tokio", optional = true } - [dev-dependencies] tokio = { git = "https://github.com/tokio-rs/tokio" } env_logger = "0.5" diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs new file mode 100644 index 000000000..3bfd7e122 --- /dev/null +++ b/tokio-postgres/src/client.rs @@ -0,0 +1,22 @@ +use crate::connection::Request; +use futures::channel::mpsc; + +pub struct Client { + sender: mpsc::UnboundedSender, + process_id: i32, + secret_key: i32, +} + +impl Client { + pub(crate) fn new( + sender: mpsc::UnboundedSender, + process_id: i32, + secret_key: i32, + ) -> Client { + Client { + sender, + process_id, + secret_key, + } + } +} diff --git a/tokio-postgres/src/codec.rs b/tokio-postgres/src/codec.rs new file mode 100644 index 000000000..4e56459aa --- /dev/null +++ b/tokio-postgres/src/codec.rs @@ -0,0 +1,99 @@ +use bytes::{Buf, BytesMut}; +use fallible_iterator::FallibleIterator; +use postgres_protocol::message::backend; +use postgres_protocol::message::frontend::CopyData; +use std::io; +use tokio::codec::{Decoder, Encoder}; + +pub enum FrontendMessage { + Raw(Vec), + CopyData(CopyData>), +} + +pub enum BackendMessage { + Normal { + messages: BackendMessages, + request_complete: bool, + }, + Async(backend::Message), +} + +pub struct BackendMessages(BytesMut); + +impl BackendMessages { + pub fn empty() -> BackendMessages { + BackendMessages(BytesMut::new()) + } +} + +impl FallibleIterator for BackendMessages { + type Item = backend::Message; + type Error = io::Error; + + fn next(&mut self) -> io::Result> { + backend::Message::parse(&mut self.0) + } +} + +pub struct PostgresCodec; + +impl Encoder for PostgresCodec { + type Item = FrontendMessage; + type Error = io::Error; + + fn encode(&mut self, item: FrontendMessage, dst: &mut BytesMut) -> io::Result<()> { + match item { + FrontendMessage::Raw(buf) => dst.extend_from_slice(&buf), + FrontendMessage::CopyData(data) => data.write(dst), + } + + Ok(()) + } +} + +impl Decoder for PostgresCodec { + type Item = BackendMessage; + type Error = io::Error; + + fn decode(&mut self, src: &mut BytesMut) -> Result, io::Error> { + let mut idx = 0; + let mut request_complete = false; + + while let Some(header) = backend::Header::parse(&src[idx..])? { + let len = header.len() as usize + 1; + if src[idx..].len() < len { + break; + } + + match header.tag() { + backend::NOTICE_RESPONSE_TAG + | backend::NOTIFICATION_RESPONSE_TAG + | backend::PARAMETER_STATUS_TAG => { + if idx == 0 { + let message = backend::Message::parse(src)?.unwrap(); + return Ok(Some(BackendMessage::Async(message))); + } else { + break; + } + } + _ => {} + } + + idx += len; + + if header.tag() == backend::READY_FOR_QUERY_TAG { + request_complete = true; + break; + } + } + + if idx == 0 { + Ok(None) + } else { + Ok(Some(BackendMessage::Normal { + messages: BackendMessages(src.split_to(idx)), + request_complete, + })) + } + } +} diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs new file mode 100644 index 000000000..bac19e564 --- /dev/null +++ b/tokio-postgres/src/config.rs @@ -0,0 +1,793 @@ +//! Connection configuration. + +use crate::connect_raw::connect_raw; +use crate::tls::TlsConnect; +use crate::{Client, Connection, Error}; +use std::borrow::Cow; +#[cfg(unix)] +use std::ffi::OsStr; +#[cfg(unix)] +use std::os::unix::ffi::OsStrExt; +#[cfg(unix)] +use std::path::{Path, PathBuf}; +use std::str; +use std::str::FromStr; +use std::time::Duration; +use std::{error, fmt, iter, mem}; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// Properties required of a session. +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum TargetSessionAttrs { + /// No special properties are required. + Any, + /// The session must allow writes. + ReadWrite, + #[doc(hidden)] + __NonExhaustive, +} + +/// TLS configuration. +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum SslMode { + /// Do not use TLS. + Disable, + /// Attempt to connect with TLS but allow sessions without. + Prefer, + /// Require the use of TLS. + Require, + #[doc(hidden)] + __NonExhaustive, +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum Host { + Tcp(String), + #[cfg(unix)] + Unix(PathBuf), +} + +/// Connection configuration. +/// +/// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: +/// +/// # Key-Value +/// +/// This format consists of space-separated key-value pairs. Values which are either the empty string or contain +/// whitespace should be wrapped in `'`. `'` and `\` characters should be backslash-escaped. +/// +/// ## Keys +/// +/// * `user` - The username to authenticate with. Required. +/// * `password` - The password to authenticate with. +/// * `dbname` - The name of the database to connect to. Defaults to the username. +/// * `options` - Command line options used to configure the server. +/// * `application_name` - Sets the `application_name` parameter on the server. +/// * `sslmode` - Controls usage of TLS. If set to `disable`, TLS will not be used. If set to `prefer`, TLS will be used +/// if available, but not used otherwise. If set to `require`, TLS will be forced to be used. Defaults to `prefer`. +/// * `host` - The host to connect to. On Unix platforms, if the host starts with a `/` character it is treated as the +/// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts +/// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting +/// with the `connect` method. +/// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be +/// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if +/// omitted or the empty string. +/// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames +/// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. +/// This option is ignored when connecting with Unix sockets. Defaults to on. +/// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. +/// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that +/// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server +/// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// +/// ## Examples +/// +/// ```not_rust +/// host=localhost user=postgres connect_timeout=10 keepalives=0 +/// ``` +/// +/// ```not_rust +/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// ``` +/// +/// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write +/// ``` +/// +/// # Url +/// +/// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, +/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, +/// as the path component of the URL specifies the database name. +/// +/// ## Examples +/// +/// ```not_rust +/// postgresql://user@localhost +/// ``` +/// +/// ```not_rust +/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// ``` +/// +/// ```not_rust +/// postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write +/// ``` +/// +/// ```not_rust +/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// ``` +#[derive(PartialEq)] +pub struct Config { + pub(crate) user: Option, + pub(crate) password: Option>, + pub(crate) dbname: Option, + pub(crate) options: Option, + pub(crate) application_name: Option, + pub(crate) ssl_mode: SslMode, + pub(crate) host: Vec, + pub(crate) port: Vec, + pub(crate) connect_timeout: Option, + pub(crate) keepalives: bool, + pub(crate) keepalives_idle: Duration, + pub(crate) target_session_attrs: TargetSessionAttrs, +} + +impl Default for Config { + fn default() -> Config { + Config::new() + } +} + +impl Config { + /// Creates a new configuration. + pub fn new() -> Config { + Config { + user: None, + password: None, + dbname: None, + options: None, + application_name: None, + ssl_mode: SslMode::Prefer, + host: vec![], + port: vec![], + connect_timeout: None, + keepalives: true, + keepalives_idle: Duration::from_secs(2 * 60 * 60), + target_session_attrs: TargetSessionAttrs::Any, + } + } + + /// Sets the user to authenticate with. + /// + /// Required. + pub fn user(&mut self, user: &str) -> &mut Config { + self.user = Some(user.to_string()); + self + } + + /// Sets the password to authenticate with. + pub fn password(&mut self, password: T) -> &mut Config + where + T: AsRef<[u8]>, + { + self.password = Some(password.as_ref().to_vec()); + self + } + + /// Sets the name of the database to connect to. + /// + /// Defaults to the user. + pub fn dbname(&mut self, dbname: &str) -> &mut Config { + self.dbname = Some(dbname.to_string()); + self + } + + /// Sets command line options used to configure the server. + pub fn options(&mut self, options: &str) -> &mut Config { + self.options = Some(options.to_string()); + self + } + + /// Sets the value of the `application_name` runtime parameter. + pub fn application_name(&mut self, application_name: &str) -> &mut Config { + self.application_name = Some(application_name.to_string()); + self + } + + /// Sets the SSL configuration. + /// + /// Defaults to `prefer`. + pub fn ssl_mode(&mut self, ssl_mode: SslMode) -> &mut Config { + self.ssl_mode = ssl_mode; + self + } + + /// Adds a host to the configuration. + /// + /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix + /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + pub fn host(&mut self, host: &str) -> &mut Config { + #[cfg(unix)] + { + if host.starts_with('/') { + return self.host_path(host); + } + } + + self.host.push(Host::Tcp(host.to_string())); + self + } + + /// Adds a Unix socket host to the configuration. + /// + /// Unlike `host`, this method allows non-UTF8 paths. + #[cfg(unix)] + pub fn host_path(&mut self, host: T) -> &mut Config + where + T: AsRef, + { + self.host.push(Host::Unix(host.as_ref().to_path_buf())); + self + } + + /// Adds a port to the configuration. + /// + /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which + /// case the default of 5432 is used, a single port, in which it is used for all hosts, or the same number of ports + /// as hosts. + pub fn port(&mut self, port: u16) -> &mut Config { + self.port.push(port); + self + } + + /// Sets the timeout applied to socket-level connection attempts. + /// + /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each + /// host separately. Defaults to no limit. + pub fn connect_timeout(&mut self, connect_timeout: Duration) -> &mut Config { + self.connect_timeout = Some(connect_timeout); + self + } + + /// Controls the use of TCP keepalive. + /// + /// This is ignored for Unix domain socket connections. Defaults to `true`. + pub fn keepalives(&mut self, keepalives: bool) -> &mut Config { + self.keepalives = keepalives; + self + } + + /// Sets the amount of idle time before a keepalive packet is sent on the connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. + pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { + self.keepalives_idle = keepalives_idle; + self + } + + /// Sets the requirements of the session. + /// + /// This can be used to connect to the primary server in a clustered database rather than one of the read-only + /// secondary servers. Defaults to `Any`. + pub fn target_session_attrs( + &mut self, + target_session_attrs: TargetSessionAttrs, + ) -> &mut Config { + self.target_session_attrs = target_session_attrs; + self + } + + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { + match key { + "user" => { + self.user(&value); + } + "password" => { + self.password(value); + } + "dbname" => { + self.dbname(&value); + } + "options" => { + self.options(&value); + } + "application_name" => { + self.application_name(&value); + } + "sslmode" => { + let mode = match value { + "disable" => SslMode::Disable, + "prefer" => SslMode::Prefer, + "require" => SslMode::Require, + _ => return Err(Error::config_parse(Box::new(InvalidValue("sslmode")))), + }; + self.ssl_mode(mode); + } + "host" => { + for host in value.split(',') { + self.host(host); + } + } + "port" => { + for port in value.split(',') { + let port = if port.is_empty() { + 5432 + } else { + port.parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("port"))))? + }; + self.port(port); + } + } + "connect_timeout" => { + let timeout = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("connect_timeout"))))?; + if timeout > 0 { + self.connect_timeout(Duration::from_secs(timeout as u64)); + } + } + "keepalives" => { + let keepalives = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; + self.keepalives(keepalives != 0); + } + "keepalives_idle" => { + let keepalives_idle = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives_idle"))))?; + if keepalives_idle > 0 { + self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); + } + } + "target_session_attrs" => { + let target_session_attrs = match &*value { + "any" => TargetSessionAttrs::Any, + "read-write" => TargetSessionAttrs::ReadWrite, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "target_session_attrs", + )))); + } + }; + self.target_session_attrs(target_session_attrs); + } + key => { + return Err(Error::config_parse(Box::new(UnknownOption( + key.to_string(), + )))); + } + } + + Ok(()) + } + + /// Connects to a PostgreSQL database over an arbitrary stream. + /// + /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. + pub async fn connect_raw( + &self, + stream: S, + tls: T, + ) -> Result<(Client, Connection), Error> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, + { + connect_raw(stream, tls, self, None).await + } +} + +impl FromStr for Config { + type Err = Error; + + fn from_str(s: &str) -> Result { + match UrlParser::parse(s)? { + Some(config) => Ok(config), + None => Parser::parse(s), + } + } +} + +// Omit password from debug output +impl fmt::Debug for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct Redaction {} + impl fmt::Debug for Redaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_") + } + } + + f.debug_struct("Config") + .field("user", &self.user) + .field("password", &self.password.as_ref().map(|_| Redaction {})) + .field("dbname", &self.dbname) + .field("options", &self.options) + .field("application_name", &self.application_name) + .field("ssl_mode", &self.ssl_mode) + .field("host", &self.host) + .field("port", &self.port) + .field("connect_timeout", &self.connect_timeout) + .field("keepalives", &self.keepalives) + .field("keepalives_idle", &self.keepalives_idle) + .field("target_session_attrs", &self.target_session_attrs) + .finish() + } +} + +#[derive(Debug)] +struct UnknownOption(String); + +impl fmt::Display for UnknownOption { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "unknown option `{}`", self.0) + } +} + +impl error::Error for UnknownOption {} + +#[derive(Debug)] +struct InvalidValue(&'static str); + +impl fmt::Display for InvalidValue { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "invalid value for option `{}`", self.0) + } +} + +impl error::Error for InvalidValue {} + +struct Parser<'a> { + s: &'a str, + it: iter::Peekable>, +} + +impl<'a> Parser<'a> { + fn parse(s: &'a str) -> Result { + let mut parser = Parser { + s, + it: s.char_indices().peekable(), + }; + + let mut config = Config::new(); + + while let Some((key, value)) = parser.parameter()? { + config.param(key, &value)?; + } + + Ok(config) + } + + fn skip_ws(&mut self) { + self.take_while(char::is_whitespace); + } + + fn take_while(&mut self, f: F) -> &'a str + where + F: Fn(char) -> bool, + { + let start = match self.it.peek() { + Some(&(i, _)) => i, + None => return "", + }; + + loop { + match self.it.peek() { + Some(&(_, c)) if f(c) => { + self.it.next(); + } + Some(&(i, _)) => return &self.s[start..i], + None => return &self.s[start..], + } + } + } + + fn eat(&mut self, target: char) -> Result<(), Error> { + match self.it.next() { + Some((_, c)) if c == target => Ok(()), + Some((i, c)) => { + let m = format!( + "unexpected character at byte {}: expected `{}` but got `{}`", + i, target, c + ); + Err(Error::config_parse(m.into())) + } + None => Err(Error::config_parse("unexpected EOF".into())), + } + } + + fn eat_if(&mut self, target: char) -> bool { + match self.it.peek() { + Some(&(_, c)) if c == target => { + self.it.next(); + true + } + _ => false, + } + } + + fn keyword(&mut self) -> Option<&'a str> { + let s = self.take_while(|c| match c { + c if c.is_whitespace() => false, + '=' => false, + _ => true, + }); + + if s.is_empty() { + None + } else { + Some(s) + } + } + + fn value(&mut self) -> Result { + let value = if self.eat_if('\'') { + let value = self.quoted_value()?; + self.eat('\'')?; + value + } else { + self.simple_value()? + }; + + Ok(value) + } + + fn simple_value(&mut self) -> Result { + let mut value = String::new(); + + while let Some(&(_, c)) = self.it.peek() { + if c.is_whitespace() { + break; + } + + self.it.next(); + if c == '\\' { + if let Some((_, c2)) = self.it.next() { + value.push(c2); + } + } else { + value.push(c); + } + } + + if value.is_empty() { + return Err(Error::config_parse("unexpected EOF".into())); + } + + Ok(value) + } + + fn quoted_value(&mut self) -> Result { + let mut value = String::new(); + + while let Some(&(_, c)) = self.it.peek() { + if c == '\'' { + return Ok(value); + } + + self.it.next(); + if c == '\\' { + if let Some((_, c2)) = self.it.next() { + value.push(c2); + } + } else { + value.push(c); + } + } + + Err(Error::config_parse( + "unterminated quoted connection parameter value".into(), + )) + } + + fn parameter(&mut self) -> Result, Error> { + self.skip_ws(); + let keyword = match self.keyword() { + Some(keyword) => keyword, + None => return Ok(None), + }; + self.skip_ws(); + self.eat('=')?; + self.skip_ws(); + let value = self.value()?; + + Ok(Some((keyword, value))) + } +} + +// This is a pretty sloppy "URL" parser, but it matches the behavior of libpq, where things really aren't very strict +struct UrlParser<'a> { + s: &'a str, + config: Config, +} + +impl<'a> UrlParser<'a> { + fn parse(s: &'a str) -> Result, Error> { + let s = match Self::remove_url_prefix(s) { + Some(s) => s, + None => return Ok(None), + }; + + let mut parser = UrlParser { + s, + config: Config::new(), + }; + + parser.parse_credentials()?; + parser.parse_host()?; + parser.parse_path()?; + parser.parse_params()?; + + Ok(Some(parser.config)) + } + + fn remove_url_prefix(s: &str) -> Option<&str> { + for prefix in &["postgres://", "postgresql://"] { + if s.starts_with(prefix) { + return Some(&s[prefix.len()..]); + } + } + + None + } + + fn take_until(&mut self, end: &[char]) -> Option<&'a str> { + match self.s.find(end) { + Some(pos) => { + let (head, tail) = self.s.split_at(pos); + self.s = tail; + Some(head) + } + None => None, + } + } + + fn take_all(&mut self) -> &'a str { + mem::replace(&mut self.s, "") + } + + fn eat_byte(&mut self) { + self.s = &self.s[1..]; + } + + fn parse_credentials(&mut self) -> Result<(), Error> { + let creds = match self.take_until(&['@']) { + Some(creds) => creds, + None => return Ok(()), + }; + self.eat_byte(); + + let mut it = creds.splitn(2, ':'); + let user = self.decode(it.next().unwrap())?; + self.config.user(&user); + + if let Some(password) = it.next() { + let password = Cow::from(percent_encoding::percent_decode(password.as_bytes())); + self.config.password(password); + } + + Ok(()) + } + + fn parse_host(&mut self) -> Result<(), Error> { + let host = match self.take_until(&['/', '?']) { + Some(host) => host, + None => self.take_all(), + }; + + if host.is_empty() { + return Ok(()); + } + + for chunk in host.split(',') { + let (host, port) = if chunk.starts_with('[') { + let idx = match chunk.find(']') { + Some(idx) => idx, + None => return Err(Error::config_parse(InvalidValue("host").into())), + }; + + let host = &chunk[1..idx]; + let remaining = &chunk[idx + 1..]; + let port = if remaining.starts_with(':') { + Some(&remaining[1..]) + } else if remaining.is_empty() { + None + } else { + return Err(Error::config_parse(InvalidValue("host").into())); + }; + + (host, port) + } else { + let mut it = chunk.splitn(2, ':'); + (it.next().unwrap(), it.next()) + }; + + self.host_param(host)?; + let port = self.decode(port.unwrap_or("5432"))?; + self.config.param("port", &port)?; + } + + Ok(()) + } + + fn parse_path(&mut self) -> Result<(), Error> { + if !self.s.starts_with('/') { + return Ok(()); + } + self.eat_byte(); + + let dbname = match self.take_until(&['?']) { + Some(dbname) => dbname, + None => self.take_all(), + }; + + if !dbname.is_empty() { + self.config.dbname(&self.decode(dbname)?); + } + + Ok(()) + } + + fn parse_params(&mut self) -> Result<(), Error> { + if !self.s.starts_with('?') { + return Ok(()); + } + self.eat_byte(); + + while !self.s.is_empty() { + let key = match self.take_until(&['=']) { + Some(key) => self.decode(key)?, + None => return Err(Error::config_parse("unterminated parameter".into())), + }; + self.eat_byte(); + + let value = match self.take_until(&['&']) { + Some(value) => { + self.eat_byte(); + value + } + None => self.take_all(), + }; + + if key == "host" { + self.host_param(value)?; + } else { + let value = self.decode(value)?; + self.config.param(&key, &value)?; + } + } + + Ok(()) + } + + #[cfg(unix)] + fn host_param(&mut self, s: &str) -> Result<(), Error> { + let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); + if decoded.get(0) == Some(&b'/') { + self.config.host_path(OsStr::from_bytes(&decoded)); + } else { + let decoded = str::from_utf8(&decoded).map_err(|e| Error::config_parse(Box::new(e)))?; + self.config.host(decoded); + } + + Ok(()) + } + + #[cfg(not(unix))] + fn host_param(&mut self, s: &str) -> Result<(), Error> { + let s = self.decode(s)?; + self.config.param("host", &s) + } + + fn decode(&self, s: &'a str) -> Result, Error> { + percent_encoding::percent_decode(s.as_bytes()) + .decode_utf8() + .map_err(|e| Error::config_parse(e.into())) + } +} diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tokio-postgres/src/connect.rs @@ -0,0 +1 @@ + diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs new file mode 100644 index 000000000..498658e19 --- /dev/null +++ b/tokio-postgres/src/connect_raw.rs @@ -0,0 +1,323 @@ +use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; +use crate::config::Config; +use crate::connect_tls::connect_tls; +use crate::maybe_tls_stream::MaybeTlsStream; +use crate::tls::{ChannelBinding, TlsConnect}; +use crate::{Client, Connection, Error}; +use fallible_iterator::FallibleIterator; +use futures::channel::mpsc; +use futures::{ready, Sink, SinkExt, Stream, TryStreamExt}; +use postgres_protocol::authentication; +use postgres_protocol::authentication::sasl; +use postgres_protocol::authentication::sasl::ScramSha256; +use postgres_protocol::message::backend::{AuthenticationSaslBody, Message}; +use postgres_protocol::message::frontend; +use std::collections::HashMap; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::codec::Framed; +use tokio::io::{AsyncRead, AsyncWrite}; + +pub struct StartupStream { + inner: Framed, PostgresCodec>, + buf: BackendMessages, +} + +impl Sink for StartupStream +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + type Error = io::Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_ready(cx) + } + + fn start_send(mut self: Pin<&mut Self>, item: FrontendMessage) -> io::Result<()> { + Pin::new(&mut self.inner).start_send(item) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_close(cx) + } +} + +impl Stream for StartupStream +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + type Item = io::Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + loop { + match self.buf.next() { + Ok(Some(message)) => return Poll::Ready(Some(Ok(message))), + Ok(None) => {} + Err(e) => return Poll::Ready(Some(Err(e))), + } + + match ready!(Pin::new(&mut self.inner).poll_next(cx)) { + Some(Ok(BackendMessage::Normal { messages, .. })) => self.buf = messages, + Some(Ok(BackendMessage::Async(message))) => return Poll::Ready(Some(Ok(message))), + Some(Err(e)) => return Poll::Ready(Some(Err(e))), + None => return Poll::Ready(None), + } + } + } +} + +pub async fn connect_raw( + stream: S, + tls: T, + config: &Config, + idx: Option, +) -> Result<(Client, Connection), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, +{ + let (stream, channel_binding) = connect_tls(stream, config.ssl_mode, tls).await?; + + let mut stream = StartupStream { + inner: Framed::new(stream, PostgresCodec), + buf: BackendMessages::empty(), + }; + + startup(&mut stream, config).await?; + authenticate(&mut stream, channel_binding, config).await?; + let (process_id, secret_key, parameters) = read_info(&mut stream).await?; + + let (sender, receiver) = mpsc::unbounded(); + let client = Client::new(sender, process_id, secret_key); + let connection = Connection::new(stream.inner, parameters, receiver); + + Ok((client, connection)) +} + +async fn startup(stream: &mut StartupStream, config: &Config) -> Result<(), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + let mut params = vec![("client_encoding", "UTF8"), ("timezone", "GMT")]; + if let Some(user) = &config.user { + params.push(("user", &**user)); + } + if let Some(dbname) = &config.dbname { + params.push(("database", &**dbname)); + } + if let Some(options) = &config.options { + params.push(("options", &**options)); + } + if let Some(application_name) = &config.application_name { + params.push(("application_name", &**application_name)); + } + + let mut buf = vec![]; + frontend::startup_message(params, &mut buf).map_err(Error::encode)?; + + stream + .send(FrontendMessage::Raw(buf)) + .await + .map_err(Error::io) +} + +async fn authenticate( + stream: &mut StartupStream, + channel_binding: ChannelBinding, + config: &Config, +) -> Result<(), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + match stream.try_next().await.map_err(Error::io)? { + Some(Message::AuthenticationOk) => return Ok(()), + Some(Message::AuthenticationCleartextPassword) => { + let pass = config + .password + .as_ref() + .ok_or_else(|| Error::config("password missing".into()))?; + + authenticate_password(stream, pass).await?; + } + Some(Message::AuthenticationMd5Password(body)) => { + let user = config + .user + .as_ref() + .ok_or_else(|| Error::config("user missing".into()))?; + let pass = config + .password + .as_ref() + .ok_or_else(|| Error::config("password missing".into()))?; + + let output = authentication::md5_hash(user.as_bytes(), pass, body.salt()); + authenticate_password(stream, output.as_bytes()).await?; + } + Some(Message::AuthenticationSasl(body)) => { + let pass = config + .password + .as_ref() + .ok_or_else(|| Error::config("password missing".into()))?; + + authenticate_sasl(stream, body, channel_binding, pass).await?; + } + Some(Message::AuthenticationKerberosV5) + | Some(Message::AuthenticationScmCredential) + | Some(Message::AuthenticationGss) + | Some(Message::AuthenticationSspi) => { + return Err(Error::authentication( + "unsupported authentication method".into(), + )) + } + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + } + + match stream.try_next().await.map_err(Error::io)? { + Some(Message::AuthenticationOk) => Ok(()), + Some(Message::ErrorResponse(body)) => Err(Error::db(body)), + Some(_) => Err(Error::unexpected_message()), + None => Err(Error::closed()), + } +} + +async fn authenticate_password( + stream: &mut StartupStream, + password: &[u8], +) -> Result<(), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + let mut buf = vec![]; + frontend::password_message(password, &mut buf).map_err(Error::encode)?; + + stream + .send(FrontendMessage::Raw(buf)) + .await + .map_err(Error::io) +} + +async fn authenticate_sasl( + stream: &mut StartupStream, + body: AuthenticationSaslBody, + channel_binding: ChannelBinding, + password: &[u8], +) -> Result<(), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + let mut has_scram = false; + let mut has_scram_plus = false; + let mut mechanisms = body.mechanisms(); + while let Some(mechanism) = mechanisms.next().map_err(Error::parse)? { + match mechanism { + sasl::SCRAM_SHA_256 => has_scram = true, + sasl::SCRAM_SHA_256_PLUS => has_scram_plus = true, + _ => {} + } + } + + let channel_binding = channel_binding + .tls_server_end_point + .map(sasl::ChannelBinding::tls_server_end_point); + + let (channel_binding, mechanism) = if has_scram_plus { + match channel_binding { + Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), + None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), + } + } else if has_scram { + match channel_binding { + Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), + None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), + } + } else { + return Err(Error::authentication("unsupported SASL mechanism".into())); + }; + + let mut scram = ScramSha256::new(password, channel_binding); + + let mut buf = vec![]; + frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?; + stream + .send(FrontendMessage::Raw(buf)) + .await + .map_err(Error::io)?; + + let body = match stream.try_next().await.map_err(Error::io)? { + Some(Message::AuthenticationSaslContinue(body)) => body, + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + }; + + scram + .update(body.data()) + .map_err(|e| Error::authentication(e.into()))?; + + let mut buf = vec![]; + frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?; + stream + .send(FrontendMessage::Raw(buf)) + .await + .map_err(Error::io)?; + + let body = match stream.try_next().await.map_err(Error::io)? { + Some(Message::AuthenticationSaslFinal(body)) => body, + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + }; + + scram + .finish(body.data()) + .map_err(|e| Error::authentication(e.into()))?; + + Ok(()) +} + +async fn read_info( + stream: &mut StartupStream, +) -> Result<(i32, i32, HashMap), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + let mut process_id = 0; + let mut secret_key = 0; + let mut parameters = HashMap::new(); + + loop { + match stream.try_next().await.map_err(Error::io)? { + Some(Message::BackendKeyData(body)) => { + process_id = body.process_id(); + secret_key = body.secret_key(); + } + Some(Message::ParameterStatus(body)) => { + parameters.insert( + body.name().map_err(Error::parse)?.to_string(), + body.value().map_err(Error::parse)?.to_string(), + ); + } + Some(Message::NoticeResponse(_)) => {} + Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key, parameters)), + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(_) => return Err(Error::unexpected_message()), + None => return Err(Error::closed()), + } + } +} diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs new file mode 100644 index 000000000..1bc35f584 --- /dev/null +++ b/tokio-postgres/src/connect_tls.rs @@ -0,0 +1,48 @@ +use crate::config::SslMode; +use crate::maybe_tls_stream::MaybeTlsStream; +use crate::tls::private::ForcePrivateApi; +use crate::tls::{ChannelBinding, TlsConnect}; +use crate::Error; +use postgres_protocol::message::frontend; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +pub async fn connect_tls( + mut stream: S, + mode: SslMode, + tls: T, +) -> Result<(MaybeTlsStream, ChannelBinding), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, +{ + match mode { + SslMode::Disable => return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())), + SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { + return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())) + } + SslMode::Prefer | SslMode::Require => {} + SslMode::__NonExhaustive => unreachable!(), + } + + let mut buf = vec![]; + frontend::ssl_request(&mut buf); + stream.write_all(&buf).await.map_err(Error::io)?; + + let mut buf = [0]; + stream.read_exact(&mut buf).await.map_err(Error::io)?; + + if buf[0] != b'S' { + if SslMode::Require == mode { + return Err(Error::tls("server does not support TLS".into())); + } else { + return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())); + } + } + + let (stream, channel_binding) = tls + .connect(stream) + .await + .map_err(|e| Error::tls(e.into()))?; + + Ok((MaybeTlsStream::Tls(stream), channel_binding)) +} diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs new file mode 100644 index 000000000..a65121474 --- /dev/null +++ b/tokio-postgres/src/connection.rs @@ -0,0 +1,34 @@ +use crate::codec::{BackendMessages, FrontendMessage, PostgresCodec}; +use crate::maybe_tls_stream::MaybeTlsStream; +use futures::channel::mpsc; +use std::collections::HashMap; +use tokio::codec::Framed; + +pub enum RequestMessages { + Single(FrontendMessage), +} + +pub struct Request { + pub messages: RequestMessages, + pub sender: mpsc::Sender, +} + +pub struct Connection { + stream: Framed, PostgresCodec>, + parameters: HashMap, + receiver: mpsc::UnboundedReceiver, +} + +impl Connection { + pub(crate) fn new( + stream: Framed, PostgresCodec>, + parameters: HashMap, + receiver: mpsc::UnboundedReceiver, + ) -> Connection { + Connection { + stream, + parameters, + receiver, + } + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 14c0df497..c1bba0744 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -112,5 +112,19 @@ #![warn(rust_2018_idioms, clippy::all, missing_docs)] #![feature(async_await)] +pub use client::Client; +pub use config::Config; +pub use connection::Connection; +pub use error::Error; + +mod client; +mod codec; +pub mod config; +mod connect; +mod connect_raw; +mod connect_tls; +mod connection; pub mod error; +mod maybe_tls_stream; +pub mod tls; pub mod types; diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs new file mode 100644 index 000000000..825334e95 --- /dev/null +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -0,0 +1,97 @@ +use bytes::{Buf, BufMut}; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite}; + +pub enum MaybeTlsStream { + Raw(S), + Tls(T), +} + +impl AsyncRead for MaybeTlsStream +where + S: AsyncRead + Unpin, + T: AsyncRead + Unpin, +{ + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + match self { + MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), + MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), + } + } + + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_read(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_read(cx, buf), + } + } + + fn poll_read_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + B: BufMut, + { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_read_buf(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_read_buf(cx, buf), + } + } +} + +impl AsyncWrite for MaybeTlsStream +where + S: AsyncWrite + Unpin, + T: AsyncWrite + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + unsafe { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_write(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_write(cx, buf), + } + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_flush(cx), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_flush(cx), + } + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_shutdown(cx), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_shutdown(cx), + } + } + + fn poll_write_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + B: Buf, + { + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_write_buf(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_write_buf(cx, buf), + } + } +} diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs new file mode 100644 index 000000000..993005218 --- /dev/null +++ b/tokio-postgres/src/tls.rs @@ -0,0 +1,144 @@ +//! TLS support. + +use std::error::Error; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{fmt, io}; +use tokio::io::{AsyncRead, AsyncWrite}; + +pub(crate) mod private { + pub struct ForcePrivateApi; +} + +/// Channel binding information returned from a TLS handshake. +pub struct ChannelBinding { + pub(crate) tls_server_end_point: Option>, +} + +impl ChannelBinding { + /// Creates a `ChannelBinding` containing no information. + pub fn none() -> ChannelBinding { + ChannelBinding { + tls_server_end_point: None, + } + } + + /// Creates a `ChannelBinding` containing `tls-server-end-point` channel binding information. + pub fn tls_server_end_point(tls_server_end_point: Vec) -> ChannelBinding { + ChannelBinding { + tls_server_end_point: Some(tls_server_end_point), + } + } +} + +/// A constructor of `TlsConnect`ors. +/// +/// Requires the `runtime` Cargo feature (enabled by default). +#[cfg(feature = "runtime")] +pub trait MakeTlsConnect { + /// The stream type created by the `TlsConnect` implementation. + type Stream: AsyncRead + AsyncWrite + Unpin; + /// The `TlsConnect` implementation created by this type. + type TlsConnect: TlsConnect; + /// The error type retured by the `TlsConnect` implementation. + type Error: Into>; + + /// Creates a new `TlsConnect`or. + /// + /// The domain name is provided for certificate verification and SNI. + fn make_tls_connect(&mut self, domain: &str) -> Result; +} + +/// An asynchronous function wrapping a stream in a TLS session. +pub trait TlsConnect { + /// The stream returned by the future. + type Stream: AsyncRead + AsyncWrite + Unpin; + /// The error returned by the future. + type Error: Into>; + /// The future returned by the connector. + type Future: Future>; + + /// Returns a future performing a TLS handshake over the stream. + fn connect(self, stream: S) -> Self::Future; + + #[doc(hidden)] + fn can_connect(&self, _: private::ForcePrivateApi) -> bool { + true + } +} + +/// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error. +/// +/// This can be used when `sslmode` is `none` or `prefer`. +pub struct NoTls; + +impl TlsConnect for NoTls { + type Stream = NoTlsStream; + type Error = NoTlsError; + type Future = NoTlsFuture; + + fn connect(self, _: S) -> NoTlsFuture { + NoTlsFuture(()) + } + + fn can_connect(&self, _: private::ForcePrivateApi) -> bool { + false + } +} + +/// The future returned by `NoTls`. +pub struct NoTlsFuture(()); + +impl Future for NoTlsFuture { + type Output = Result<(NoTlsStream, ChannelBinding), NoTlsError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Poll::Ready(Err(NoTlsError(()))) + } +} + +/// The TLS "stream" type produced by the `NoTls` connector. +/// +/// Since `NoTls` doesn't support TLS, this type is uninhabited. +pub enum NoTlsStream {} + +impl AsyncRead for NoTlsStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + match *self {} + } +} + +impl AsyncWrite for NoTlsStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match *self {} + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match *self {} + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match *self {} + } +} + +/// The error returned by `NoTls`. +#[derive(Debug)] +pub struct NoTlsError(()); + +impl fmt::Display for NoTlsError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str("no TLS implementation configured") + } +} + +impl Error for NoTlsError {} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 06a6d7382..1403cbfa9 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,140 +1,99 @@ #![warn(rust_2018_idioms)] +#![feature(async_await)] -use futures::sync::mpsc; -use futures::{future, stream, try_ready}; -use log::debug; -use std::error::Error; -use std::fmt::Write; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::{Duration, Instant}; use tokio::net::TcpStream; -use tokio::prelude::*; -use tokio::runtime::current_thread::Runtime; -use tokio::timer::Delay; use tokio_postgres::error::SqlState; -use tokio_postgres::impls; -use tokio_postgres::tls::NoTlsStream; -use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, Client, Connection, NoTls, SimpleQueryMessage}; +use tokio_postgres::tls::{NoTls, NoTlsStream}; +use tokio_postgres::{Client, Config, Connection, Error}; mod parse; +/* #[cfg(feature = "runtime")] mod runtime; mod types; +*/ -fn connect( - s: &str, -) -> impl Future), Error = tokio_postgres::Error> -{ - let builder = s.parse::().unwrap(); - TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .map_err(|e| panic!("{}", e)) - .and_then(move |s| builder.connect_raw(s, NoTls)) -} - -fn smoke_test(s: &str) { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect(s); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let prepare = client.prepare("SELECT 1::INT4"); - let statement = runtime.block_on(prepare).unwrap(); - let select = client.query(&statement, &[]).collect().map(|rows| { - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].get::<_, i32>(0), 1); - }); - runtime.block_on(select).unwrap(); - - drop(statement); - drop(client); - runtime.run().unwrap(); +async fn connect(s: &str) -> Result<(Client, Connection), Error> { + let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .await + .unwrap(); + let config = s.parse::().unwrap(); + config.connect_raw(socket, NoTls).await } -#[test] -fn plain_password_missing() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=pass_user dbname=postgres"); - runtime.block_on(handshake).err().unwrap(); +#[tokio::test] +async fn plain_password_missing() { + connect("user=pass_user dbname=postgres") + .await + .err() + .unwrap(); } -#[test] -fn plain_password_wrong() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=pass_user password=foo dbname=postgres"); - match runtime.block_on(handshake) { +#[tokio::test] +async fn plain_password_wrong() { + match connect("user=pass_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), } } -#[test] -fn plain_password_ok() { - smoke_test("user=pass_user password=password dbname=postgres"); +#[tokio::test] +async fn plain_password_ok() { + connect("user=pass_user password=password dbname=postgres") + .await + .unwrap(); } -#[test] -fn md5_password_missing() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=md5_user dbname=postgres"); - runtime.block_on(handshake).err().unwrap(); +#[tokio::test] +async fn md5_password_missing() { + connect("user=md5_user dbname=postgres") + .await + .err() + .unwrap(); } -#[test] -fn md5_password_wrong() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=md5_user password=foo dbname=postgres"); - match runtime.block_on(handshake) { +#[tokio::test] +async fn md5_password_wrong() { + match connect("user=md5_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), } } -#[test] -fn md5_password_ok() { - smoke_test("user=md5_user password=password dbname=postgres"); +#[tokio::test] +async fn md5_password_ok() { + connect("user=md5_user password=password dbname=postgres") + .await + .unwrap(); } -#[test] -fn scram_password_missing() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=scram_user dbname=postgres"); - runtime.block_on(handshake).err().unwrap(); +#[tokio::test] +async fn scram_password_missing() { + connect("user=scram_user dbname=postgres") + .await + .err() + .unwrap(); } -#[test] -fn scram_password_wrong() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=scram_user password=foo dbname=postgres"); - match runtime.block_on(handshake) { +#[tokio::test] +async fn scram_password_wrong() { + match connect("user=scram_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), } } -#[test] -fn scram_password_ok() { - smoke_test("user=scram_user password=password dbname=postgres"); +#[tokio::test] +async fn scram_password_ok() { + connect("user=scram_user password=password dbname=postgres") + .await + .unwrap(); } +/* #[test] fn pipelined_prepare() { let _ = env_logger::try_init(); @@ -927,3 +886,4 @@ fn poll_idle_new() { }; runtime.block_on(future).unwrap(); } +*/ From 32fe52490ebf103495ab149a725af71642ce2472 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 22 Jul 2019 20:17:29 -0700 Subject: [PATCH 189/819] Runtime connect --- tokio-postgres/src/config.rs | 17 ++++ tokio-postgres/src/connect.rs | 59 ++++++++++++++ tokio-postgres/src/connect_socket.rs | 74 +++++++++++++++++ tokio-postgres/src/lib.rs | 37 ++++++++- tokio-postgres/src/socket.rs | 115 +++++++++++++++++++++++++++ tokio-postgres/src/tls.rs | 13 ++- tokio-postgres/tests/test/main.rs | 2 +- tokio-postgres/tests/test/runtime.rs | 74 ++++++++--------- 8 files changed, 346 insertions(+), 45 deletions(-) create mode 100644 tokio-postgres/src/connect_socket.rs create mode 100644 tokio-postgres/src/socket.rs diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index bac19e564..de120ea96 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,7 +1,13 @@ //! Connection configuration. +#[cfg(feature = "runtime")] +use crate::connect::connect; use crate::connect_raw::connect_raw; +#[cfg(feature = "runtime")] +use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; +#[cfg(feature = "runtime")] +use crate::Socket; use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] @@ -367,6 +373,17 @@ impl Config { Ok(()) } + /// Opens a connection to a PostgreSQL database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). + #[cfg(feature = "runtime")] + pub async fn connect(&self, tls: T) -> Result<(Client, Connection), Error> + where + T: MakeTlsConnect, + { + connect(tls, self).await + } + /// Connects to a PostgreSQL database over an arbitrary stream. /// /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 8b1378917..8bb234d0d 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1 +1,60 @@ +use crate::config::{Host, TargetSessionAttrs}; +use crate::connect_raw::connect_raw; +use crate::connect_socket::connect_socket; +use crate::tls::{MakeTlsConnect, TlsConnect}; +use crate::{Client, Config, Connection, Error, Socket}; +pub async fn connect( + mut tls: T, + config: &Config, +) -> Result<(Client, Connection), Error> +where + T: MakeTlsConnect, +{ + if config.host.is_empty() { + return Err(Error::config("host missing".into())); + } + + if config.port.len() > 1 && config.port.len() != config.host.len() { + return Err(Error::config("invalid number of ports".into())); + } + + let mut error = None; + for (i, host) in config.host.iter().enumerate() { + let hostname = match host { + Host::Tcp(host) => &**host, + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter + #[cfg(unix)] + Host::Unix(_) => "", + }; + + let tls = tls + .make_tls_connect(hostname) + .map_err(|e| Error::tls(e.into()))?; + + match connect_once(i, tls, config).await { + Ok((client, connection)) => return Ok((client, connection)), + Err(e) => error = Some(e), + } + } + + return Err(error.unwrap()); +} + +async fn connect_once( + idx: usize, + tls: T, + config: &Config, +) -> Result<(Client, Connection), Error> +where + T: TlsConnect, +{ + let socket = connect_socket(idx, config).await?; + let (client, connection) = connect_raw(socket, tls, config, Some(idx)).await?; + + if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { + unimplemented!() + } + + Ok((client, connection)) +} diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs new file mode 100644 index 000000000..d88edbc9d --- /dev/null +++ b/tokio-postgres/src/connect_socket.rs @@ -0,0 +1,74 @@ +use crate::config::Host; +use crate::{Config, Error, Socket}; +use std::future::Future; +use std::io; +use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; +use tokio::net::TcpStream; +#[cfg(unix)] +use tokio::net::UnixStream; +use tokio::timer::Timeout; + +pub async fn connect_socket(idx: usize, config: &Config) -> Result { + let port = *config + .port + .get(idx) + .or_else(|| config.port.get(0)) + .unwrap_or(&5432); + + match &config.host[idx] { + Host::Tcp(host) => { + let addrs = match host.parse::() { + Ok(ip) => { + // avoid dealing with blocking DNS entirely if possible + vec![SocketAddr::new(ip, port)].into_iter() + } + Err(_) => { + // FIXME what do? + (&**host, port).to_socket_addrs().map_err(Error::connect)? + } + }; + + let mut error = None; + for addr in addrs { + let new_error = match connect_timeout(TcpStream::connect(&addr), config).await { + Ok(socket) => return Ok(Socket::new_tcp(socket)), + Err(e) => e, + }; + error = Some(new_error); + } + + let error = error.unwrap_or_else(|| { + Error::connect(io::Error::new( + io::ErrorKind::InvalidData, + "resolved 0 addresses", + )) + }); + Err(error) + } + #[cfg(unix)] + Host::Unix(path) => { + let socket = connect_timeout(UnixStream::connect(path), config).await?; + Ok(Socket::new_unix(socket)) + } + } +} + +async fn connect_timeout(connect: F, config: &Config) -> Result +where + F: Future>, +{ + match config.connect_timeout { + Some(connect_timeout) => match Timeout::new(connect, connect_timeout).await { + Ok(Ok(socket)) => Ok(socket), + Ok(Err(e)) => Err(Error::connect(e)), + Err(_) => Err(Error::connect(io::Error::new( + io::ErrorKind::TimedOut, + "connection timed out", + ))), + }, + None => match connect.await { + Ok(socket) => Ok(socket), + Err(e) => Err(Error::connect(e)), + }, + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index c1bba0744..ea4921eee 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -112,19 +112,48 @@ #![warn(rust_2018_idioms, clippy::all, missing_docs)] #![feature(async_await)] -pub use client::Client; -pub use config::Config; -pub use connection::Connection; -pub use error::Error; +pub use crate::client::Client; +pub use crate::config::Config; +pub use crate::connection::Connection; +pub use crate::error::Error; +#[cfg(feature = "runtime")] +pub use crate::socket::Socket; +#[cfg(feature = "runtime")] +use crate::tls::MakeTlsConnect; +pub use crate::tls::NoTls; mod client; mod codec; pub mod config; +#[cfg(feature = "runtime")] mod connect; mod connect_raw; +#[cfg(feature = "runtime")] +mod connect_socket; mod connect_tls; mod connection; pub mod error; mod maybe_tls_stream; +#[cfg(feature = "runtime")] +mod socket; pub mod tls; pub mod types; + +/// A convenience function which parses a connection string and connects to the database. +/// +/// See the documentation for [`Config`] for details on the connection string format. +/// +/// Requires the `runtime` Cargo feature (enabled by default). +/// +/// [`Config`]: ./Config.t.html +#[cfg(feature = "runtime")] +pub async fn connect( + config: &str, + tls: T, +) -> Result<(Client, Connection), Error> +where + T: MakeTlsConnect, +{ + let config = config.parse::()?; + config.connect(tls).await +} diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs new file mode 100644 index 000000000..74663cf67 --- /dev/null +++ b/tokio-postgres/src/socket.rs @@ -0,0 +1,115 @@ +use bytes::{Buf, BufMut}; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream; +#[cfg(unix)] +use tokio::net::UnixStream; + +#[derive(Debug)] +enum Inner { + Tcp(TcpStream), + Unix(UnixStream), +} + +/// The standard stream type used by the crate. +/// +/// Requires the `runtime` Cargo feature (enabled by default). +#[derive(Debug)] +pub struct Socket(Inner); + +impl Socket { + pub(crate) fn new_tcp(stream: TcpStream) -> Socket { + Socket(Inner::Tcp(stream)) + } + + #[cfg(unix)] + pub(crate) fn new_unix(stream: UnixStream) -> Socket { + Socket(Inner::Unix(stream)) + } +} + +impl AsyncRead for Socket { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + match &self.0 { + Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), + #[cfg(unix)] + Inner::Unix(s) => s.prepare_uninitialized_buffer(buf), + } + } + + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_read(cx, buf), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_read(cx, buf), + } + } + + fn poll_read_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + B: BufMut, + { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_read_buf(cx, buf), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_read_buf(cx, buf), + } + } +} + +impl AsyncWrite for Socket { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_write(cx, buf), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_write(cx, buf), + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_flush(cx), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_flush(cx), + } + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_shutdown(cx), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_shutdown(cx), + } + } + + fn poll_write_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + B: Buf, + { + match &mut self.0 { + Inner::Tcp(s) => Pin::new(s).poll_write_buf(cx, buf), + #[cfg(unix)] + Inner::Unix(s) => Pin::new(s).poll_write_buf(cx, buf), + } + } +} diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 993005218..1e1adeb23 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -41,7 +41,7 @@ pub trait MakeTlsConnect { type Stream: AsyncRead + AsyncWrite + Unpin; /// The `TlsConnect` implementation created by this type. type TlsConnect: TlsConnect; - /// The error type retured by the `TlsConnect` implementation. + /// The error type returned by the `TlsConnect` implementation. type Error: Into>; /// Creates a new `TlsConnect`or. @@ -73,6 +73,17 @@ pub trait TlsConnect { /// This can be used when `sslmode` is `none` or `prefer`. pub struct NoTls; +#[cfg(feature = "runtime")] +impl MakeTlsConnect for NoTls { + type Stream = NoTlsStream; + type TlsConnect = NoTls; + type Error = NoTlsError; + + fn make_tls_connect(&mut self, _: &str) -> Result { + Ok(NoTls) + } +} + impl TlsConnect for NoTls { type Stream = NoTlsStream; type Error = NoTlsError; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 1403cbfa9..9bddea872 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -7,9 +7,9 @@ use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::{Client, Config, Connection, Error}; mod parse; -/* #[cfg(feature = "runtime")] mod runtime; +/* mod types; */ diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 2af9a18d7..c18e5be48 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -5,70 +5,65 @@ use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::NoTls; -fn smoke_test(s: &str) { - let mut runtime = Runtime::new().unwrap(); - let connect = tokio_postgres::connect(s, NoTls); - let (mut client, connection) = runtime.block_on(connect).unwrap(); +async fn smoke_test(s: &str) { + let (mut client, connection) = tokio_postgres::connect(s, NoTls).await.unwrap(); + /* let connection = connection.map_err(|e| panic!("{}", e)); runtime.spawn(connection); let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); + */ } -#[test] +#[tokio::test] #[ignore] // FIXME doesn't work with our docker-based tests :( -fn unix_socket() { - smoke_test("host=/var/run/postgresql port=5433 user=postgres"); +async fn unix_socket() { + smoke_test("host=/var/run/postgresql port=5433 user=postgres").await; } -#[test] -fn tcp() { - smoke_test("host=localhost port=5433 user=postgres") +#[tokio::test] +async fn tcp() { + smoke_test("host=localhost port=5433 user=postgres").await; } -#[test] -fn multiple_hosts_one_port() { - smoke_test("host=foobar.invalid,localhost port=5433 user=postgres"); +#[tokio::test] +async fn multiple_hosts_one_port() { + smoke_test("host=foobar.invalid,localhost port=5433 user=postgres").await; } -#[test] -fn multiple_hosts_multiple_ports() { - smoke_test("host=foobar.invalid,localhost port=5432,5433 user=postgres"); +#[tokio::test] +async fn multiple_hosts_multiple_ports() { + smoke_test("host=foobar.invalid,localhost port=5432,5433 user=postgres").await; } -#[test] -fn wrong_port_count() { - let mut runtime = Runtime::new().unwrap(); - let f = tokio_postgres::connect("host=localhost port=5433,5433 user=postgres", NoTls); - runtime.block_on(f).err().unwrap(); - - let f = tokio_postgres::connect( - "host=localhost,localhost,localhost port=5433,5433 user=postgres", - NoTls, - ); - runtime.block_on(f).err().unwrap(); +#[tokio::test] +async fn wrong_port_count() { + tokio_postgres::connect("host=localhost port=5433,5433 user=postgres", NoTls) + .await + .err() + .unwrap(); } -#[test] -fn target_session_attrs_ok() { - let mut runtime = Runtime::new().unwrap(); - let f = tokio_postgres::connect( +/* +#[tokio::test] +async fn target_session_attrs_ok() { + tokio_postgres::connect( "host=localhost port=5433 user=postgres target_session_attrs=read-write", NoTls, - ); - runtime.block_on(f).unwrap(); + ) + .await + .err() + .unwrap(); } -#[test] -fn target_session_attrs_err() { - let mut runtime = Runtime::new().unwrap(); - let f = tokio_postgres::connect( +#[tokio::test] +async fn target_session_attrs_err() { + tokio_postgres::connect( "host=localhost port=5433 user=postgres target_session_attrs=read-write options='-c default_transaction_read_only=on'", NoTls, - ); - runtime.block_on(f).err().unwrap(); + ).await.err().unwrap(); } #[test] @@ -100,3 +95,4 @@ fn cancel_query() { let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); } +*/ From 2480fefd2c47026468864a5e68d168347c0b6c7b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 22 Jul 2019 21:27:21 -0700 Subject: [PATCH 190/819] Connection IO logic --- tokio-postgres/src/connection.rs | 279 ++++++++++++++++++++++++++- tokio-postgres/src/lib.rs | 41 ++++ tokio-postgres/tests/test/main.rs | 34 ++-- tokio-postgres/tests/test/runtime.rs | 8 +- 4 files changed, 339 insertions(+), 23 deletions(-) diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index a65121474..4bd83c9b6 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -1,8 +1,20 @@ -use crate::codec::{BackendMessages, FrontendMessage, PostgresCodec}; +use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; +use crate::error::DbError; use crate::maybe_tls_stream::MaybeTlsStream; +use crate::{AsyncMessage, Error, Notification}; +use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use std::collections::HashMap; +use futures::{ready, Sink, Stream, StreamExt}; +use log::trace; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::collections::{HashMap, VecDeque}; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; use tokio::codec::Framed; +use tokio::io::{AsyncRead, AsyncWrite}; pub enum RequestMessages { Single(FrontendMessage), @@ -13,13 +25,40 @@ pub struct Request { pub sender: mpsc::Sender, } +pub struct Response { + sender: mpsc::Sender, +} + +#[derive(PartialEq, Debug)] +enum State { + Active, + Terminating, + Closing, +} + +/// A connection to a PostgreSQL database. +/// +/// This is one half of what is returned when a new connection is established. It performs the actual IO with the +/// server, and should generally be spawned off onto an executor to run in the background. +/// +/// `Connection` implements `Future`, and only resolves when the connection is closed, either because a fatal error has +/// occurred, or because its associated `Client` has dropped and all outstanding work has completed. +#[must_use = "futures do nothing unless polled"] pub struct Connection { stream: Framed, PostgresCodec>, parameters: HashMap, receiver: mpsc::UnboundedReceiver, + pending_request: Option, + pending_response: Option, + responses: VecDeque, + state: State, } -impl Connection { +impl Connection +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ pub(crate) fn new( stream: Framed, PostgresCodec>, parameters: HashMap, @@ -29,6 +68,240 @@ impl Connection { stream, parameters, receiver, + pending_request: None, + pending_response: None, + responses: VecDeque::new(), + state: State::Active, + } + } + + /// Returns the value of a runtime parameter for this connection. + pub fn parameter(&self, name: &str) -> Option<&str> { + self.parameters.get(name).map(|s| &**s) + } + + fn poll_response( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + if let Some(message) = self.pending_response.take() { + trace!("retrying pending response"); + return Poll::Ready(Some(Ok(message))); + } + + Pin::new(&mut self.stream) + .poll_next(cx) + .map(|o| o.map(|r| r.map_err(Error::io))) + } + + fn poll_read(&mut self, cx: &mut Context<'_>) -> Result, Error> { + if self.state != State::Active { + trace!("poll_read: done"); + return Ok(None); + } + + loop { + let message = match self.poll_response(cx)? { + Poll::Ready(Some(message)) => message, + Poll::Ready(None) => return Err(Error::closed()), + Poll::Pending => { + trace!("poll_read: waiting on response"); + return Ok(None); + } + }; + + let (mut messages, request_complete) = match message { + BackendMessage::Async(Message::NoticeResponse(body)) => { + let error = DbError::parse(&mut body.fields()).map_err(Error::parse)?; + return Ok(Some(AsyncMessage::Notice(error))); + } + BackendMessage::Async(Message::NotificationResponse(body)) => { + let notification = Notification { + process_id: body.process_id(), + channel: body.channel().map_err(Error::parse)?.to_string(), + payload: body.message().map_err(Error::parse)?.to_string(), + }; + return Ok(Some(AsyncMessage::Notification(notification))); + } + BackendMessage::Async(Message::ParameterStatus(body)) => { + self.parameters.insert( + body.name().map_err(Error::parse)?.to_string(), + body.value().map_err(Error::parse)?.to_string(), + ); + continue; + } + BackendMessage::Async(_) => unreachable!(), + BackendMessage::Normal { + messages, + request_complete, + } => (messages, request_complete), + }; + + let mut response = match self.responses.pop_front() { + Some(response) => response, + None => match messages.next().map_err(Error::parse)? { + Some(Message::ErrorResponse(error)) => return Err(Error::db(error)), + _ => return Err(Error::unexpected_message()), + }, + }; + + match response.sender.poll_ready(cx) { + Poll::Ready(Ok(())) => { + let _ = response.sender.start_send(messages); + if !request_complete { + self.responses.push_front(response); + } + } + Poll::Ready(Err(_)) => { + // we need to keep paging through the rest of the messages even if the receiver's hung up + if !request_complete { + self.responses.push_front(response); + } + } + Poll::Pending => { + self.responses.push_front(response); + self.pending_response = Some(BackendMessage::Normal { + messages, + request_complete, + }); + trace!("poll_read: waiting on sender"); + return Ok(None); + } + } + } + } + + fn poll_request(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(messages) = self.pending_request.take() { + trace!("retrying pending request"); + return Poll::Ready(Some(messages)); + } + + match self.receiver.poll_next_unpin(cx) { + Poll::Ready(Some(request)) => { + trace!("polled new request"); + self.responses.push_back(Response { + sender: request.sender, + }); + Poll::Ready(Some(request.messages)) + } + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, } } + + fn poll_write(&mut self, cx: &mut Context<'_>) -> Result { + loop { + if self.state == State::Closing { + trace!("poll_write: done"); + return Ok(false); + } + + let request = match self.poll_request(cx) { + Poll::Ready(Some(request)) => request, + Poll::Ready(None) if self.responses.is_empty() && self.state == State::Active => { + trace!("poll_write: at eof, terminating"); + self.state = State::Terminating; + let mut request = vec![]; + frontend::terminate(&mut request); + RequestMessages::Single(FrontendMessage::Raw(request)) + } + Poll::Ready(None) => { + trace!( + "poll_write: at eof, pending responses {}", + self.responses.len() + ); + return Ok(true); + } + Poll::Pending => { + trace!("poll_write: waiting on request"); + return Ok(true); + } + }; + + if let Poll::Pending = Pin::new(&mut self.stream) + .poll_ready(cx) + .map_err(Error::io)? + { + trace!("poll_write: waiting on socket"); + self.pending_request = Some(request); + return Ok(false); + } + + match request { + RequestMessages::Single(request) => { + Pin::new(&mut self.stream) + .start_send(request) + .map_err(Error::io)?; + if self.state == State::Terminating { + trace!("poll_write: sent eof, closing"); + self.state = State::Closing; + } + } + } + } + } + + fn poll_flush(&mut self, cx: &mut Context<'_>) -> Result<(), Error> { + match Pin::new(&mut self.stream) + .poll_flush(cx) + .map_err(Error::io)? + { + Poll::Ready(()) => trace!("poll_flush: flushed"), + Poll::Pending => trace!("poll_flush: waiting on socket"), + } + Ok(()) + } + + fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.state != State::Closing { + return Poll::Pending; + } + + match Pin::new(&mut self.stream) + .poll_close(cx) + .map_err(Error::io)? + { + Poll::Ready(()) => { + trace!("poll_shutdown: complete"); + Poll::Ready(Ok(())) + } + Poll::Pending => { + trace!("poll_shutdown: waiting on socket"); + Poll::Pending + } + } + } + + pub fn poll_message( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let message = self.poll_read(cx)?; + let want_flush = self.poll_write(cx)?; + if want_flush { + self.poll_flush(cx)?; + } + match message { + Some(message) => Poll::Ready(Some(Ok(message))), + None => match self.poll_shutdown(cx) { + Poll::Ready(Ok(())) => Poll::Ready(None), + Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), + Poll::Pending => Poll::Pending, + }, + } + } +} + +impl Future for Connection +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + type Output = Result<(), Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + while let Some(_) = ready!(Pin::as_mut(&mut self).poll_message(cx)?) {} + Poll::Ready(Ok(())) + } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index ea4921eee..68b03e6d1 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -115,6 +115,7 @@ pub use crate::client::Client; pub use crate::config::Config; pub use crate::connection::Connection; +use crate::error::DbError; pub use crate::error::Error; #[cfg(feature = "runtime")] pub use crate::socket::Socket; @@ -157,3 +158,43 @@ where let config = config.parse::()?; config.connect(tls).await } + +/// An asynchronous notification. +#[derive(Clone, Debug)] +pub struct Notification { + process_id: i32, + channel: String, + payload: String, +} + +/// An asynchronous message from the server. +#[allow(clippy::large_enum_variant)] +pub enum AsyncMessage { + /// A notice. + /// + /// Notices use the same format as errors, but aren't "errors" per-se. + Notice(DbError), + /// A notification. + /// + /// Connections can subscribe to notifications with the `LISTEN` command. + Notification(Notification), + #[doc(hidden)] + __NonExhaustive, +} + +impl Notification { + /// The process ID of the notifying backend process. + pub fn process_id(&self) -> i32 { + self.process_id + } + + /// The name of the channel that the notify has been raised on. + pub fn channel(&self) -> &str { + &self.channel + } + + /// The "payload" string passed from the notifying process. + pub fn payload(&self) -> &str { + &self.payload + } +} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 9bddea872..4c928d523 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,6 +1,7 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] +use futures::FutureExt; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; @@ -13,7 +14,7 @@ mod runtime; mod types; */ -async fn connect(s: &str) -> Result<(Client, Connection), Error> { +async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .await .unwrap(); @@ -21,9 +22,16 @@ async fn connect(s: &str) -> Result<(Client, Connection) config.connect_raw(socket, NoTls).await } +async fn connect(s: &str) -> Client { + let (client, connection) = connect_raw(s).await.unwrap(); + let connection = connection.map(|r| r.unwrap()); + tokio::spawn(connection); + client +} + #[tokio::test] async fn plain_password_missing() { - connect("user=pass_user dbname=postgres") + connect_raw("user=pass_user dbname=postgres") .await .err() .unwrap(); @@ -31,7 +39,7 @@ async fn plain_password_missing() { #[tokio::test] async fn plain_password_wrong() { - match connect("user=pass_user password=foo dbname=postgres").await { + match connect_raw("user=pass_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), @@ -40,14 +48,12 @@ async fn plain_password_wrong() { #[tokio::test] async fn plain_password_ok() { - connect("user=pass_user password=password dbname=postgres") - .await - .unwrap(); + connect("user=pass_user password=password dbname=postgres").await; } #[tokio::test] async fn md5_password_missing() { - connect("user=md5_user dbname=postgres") + connect_raw("user=md5_user dbname=postgres") .await .err() .unwrap(); @@ -55,7 +61,7 @@ async fn md5_password_missing() { #[tokio::test] async fn md5_password_wrong() { - match connect("user=md5_user password=foo dbname=postgres").await { + match connect_raw("user=md5_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), @@ -64,14 +70,12 @@ async fn md5_password_wrong() { #[tokio::test] async fn md5_password_ok() { - connect("user=md5_user password=password dbname=postgres") - .await - .unwrap(); + connect("user=md5_user password=password dbname=postgres").await; } #[tokio::test] async fn scram_password_missing() { - connect("user=scram_user dbname=postgres") + connect_raw("user=scram_user dbname=postgres") .await .err() .unwrap(); @@ -79,7 +83,7 @@ async fn scram_password_missing() { #[tokio::test] async fn scram_password_wrong() { - match connect("user=scram_user password=foo dbname=postgres").await { + match connect_raw("user=scram_user password=foo dbname=postgres").await { Ok(_) => panic!("unexpected success"), Err(ref e) if e.code() == Some(&SqlState::INVALID_PASSWORD) => {} Err(e) => panic!("{}", e), @@ -88,9 +92,7 @@ async fn scram_password_wrong() { #[tokio::test] async fn scram_password_ok() { - connect("user=scram_user password=password dbname=postgres") - .await - .unwrap(); + connect("user=scram_user password=password dbname=postgres").await; } /* diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index c18e5be48..b9939e51e 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,4 +1,4 @@ -use futures::{Future, Stream}; +use futures::{Future, FutureExt, Stream}; use std::time::{Duration, Instant}; use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; @@ -7,10 +7,10 @@ use tokio_postgres::NoTls; async fn smoke_test(s: &str) { let (mut client, connection) = tokio_postgres::connect(s, NoTls).await.unwrap(); - /* - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let connection = connection.map(|e| e.unwrap()); + tokio::spawn(connection); + /* let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); runtime.block_on(execute).unwrap(); */ From f9e46510baf9955ab136ca062332e24a3b03ef74 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 23 Jul 2019 19:54:22 -0700 Subject: [PATCH 191/819] Implement prepare --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/client.rs | 77 +++++++++++++++++++++++++++++-- tokio-postgres/src/lib.rs | 3 ++ tokio-postgres/src/prepare.rs | 71 ++++++++++++++++++++++++++++ tokio-postgres/src/statement.rs | 59 +++++++++++++++++++++++ tokio-postgres/tests/test/main.rs | 19 +++++++- 6 files changed, 226 insertions(+), 5 deletions(-) create mode 100644 tokio-postgres/src/prepare.rs create mode 100644 tokio-postgres/src/statement.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 7fbf32afc..405517b91 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -34,7 +34,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] antidote = "1.0" bytes = "0.4" fallible-iterator = "0.2" -futures-preview = "0.3.0-alpha.17" +futures-preview = { version = "0.3.0-alpha.17", features = ["nightly", "async-await"] } log = "0.4" percent-encoding = "1.0" phf = "0.7.23" diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 3bfd7e122..46fb60c4f 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,8 +1,60 @@ -use crate::connection::Request; +use crate::codec::BackendMessages; +use crate::connection::{Request, RequestMessages}; +use crate::prepare::prepare; +use crate::types::Type; +use crate::{Error, Statement}; +use fallible_iterator::FallibleIterator; use futures::channel::mpsc; +use futures::{Stream, StreamExt}; +use postgres_protocol::message::backend::Message; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; -pub struct Client { +pub struct Responses { + receiver: mpsc::Receiver, + cur: BackendMessages, +} + +impl Responses { + pub async fn next(&mut self) -> Result { + loop { + match self.cur.next().map_err(Error::parse)? { + Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), + Some(message) => return Ok(message), + None => {} + } + + match self.receiver.next().await { + Some(messages) => self.cur = messages, + None => return Err(Error::closed()), + } + } + } +} + +pub struct InnerClient { sender: mpsc::UnboundedSender, +} + +impl InnerClient { + pub fn send(&self, messages: RequestMessages) -> Result { + let (sender, receiver) = mpsc::channel(1); + let request = Request { messages, sender }; + self.sender + .unbounded_send(request) + .map_err(|_| Error::closed())?; + + Ok(Responses { + receiver, + cur: BackendMessages::empty(), + }) + } +} + +pub struct Client { + inner: Arc, process_id: i32, secret_key: i32, } @@ -14,9 +66,28 @@ impl Client { secret_key: i32, ) -> Client { Client { - sender, + inner: Arc::new(InnerClient { sender }), process_id, secret_key, } } + + pub(crate) fn inner(&self) -> Arc { + self.inner.clone() + } + + pub fn prepare<'a>( + &mut self, + query: &'a str, + ) -> impl Future> + 'a { + self.prepare_typed(query, &[]) + } + + pub fn prepare_typed<'a>( + &mut self, + query: &'a str, + parameter_types: &'a [Type], + ) -> impl Future> + 'a { + prepare(self.inner(), query, parameter_types) + } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 68b03e6d1..e894b2e2c 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -122,6 +122,7 @@ pub use crate::socket::Socket; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; +pub use statement::{Column, Statement}; mod client; mod codec; @@ -135,8 +136,10 @@ mod connect_tls; mod connection; pub mod error; mod maybe_tls_stream; +mod prepare; #[cfg(feature = "runtime")] mod socket; +mod statement; pub mod tls; pub mod types; diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs new file mode 100644 index 000000000..9e243c638 --- /dev/null +++ b/tokio-postgres/src/prepare.rs @@ -0,0 +1,71 @@ +use crate::client::InnerClient; +use crate::codec::FrontendMessage; +use crate::connection::{Request, RequestMessages}; +use crate::types::{Oid, Type}; +use crate::{Column, Error, Statement}; +use fallible_iterator::FallibleIterator; +use futures::StreamExt; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +static NEXT_ID: AtomicUsize = AtomicUsize::new(0); + +pub async fn prepare( + client: Arc, + query: &str, + types: &[Type], +) -> Result { + let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); + + let mut buf = vec![]; + frontend::parse(&name, query, types.iter().map(Type::oid), &mut buf).map_err(Error::encode)?; + frontend::describe(b'S', &name, &mut buf).map_err(Error::encode)?; + frontend::sync(&mut buf); + + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::ParseComplete => {} + _ => return Err(Error::unexpected_message()), + } + + let parameter_description = match responses.next().await? { + Message::ParameterDescription(body) => body, + _ => return Err(Error::unexpected_message()), + }; + + let row_description = match responses.next().await? { + Message::RowDescription(body) => Some(body), + Message::NoData => None, + _ => return Err(Error::unexpected_message()), + }; + + let mut parameters = vec![]; + let mut it = parameter_description.parameters(); + while let Some(oid) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, oid).await?; + parameters.push(type_); + } + + let mut columns = vec![]; + if let Some(row_description) = row_description { + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, field.type_oid()).await?; + let column = Column::new(field.name().to_string(), type_); + columns.push(column); + } + } + + Ok(Statement::new(&client, name, parameters, columns)) +} + +async fn get_type(client: &InnerClient, oid: Oid) -> Result { + if let Some(type_) = Type::from_oid(oid) { + return Ok(type_); + } + + unimplemented!() +} diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs new file mode 100644 index 000000000..b18592eb0 --- /dev/null +++ b/tokio-postgres/src/statement.rs @@ -0,0 +1,59 @@ +use crate::client::InnerClient; +use crate::connection::Request; +use crate::types::Type; +use std::sync::{Arc, Weak}; + +pub struct Statement { + client: Weak, + name: String, + params: Vec, + columns: Vec, +} + +impl Statement { + pub(crate) fn new( + inner: &Arc, + name: String, + params: Vec, + columns: Vec, + ) -> Statement { + Statement { + client: Arc::downgrade(inner), + name, + params, + columns, + } + } + + /// Returns the expected types of the statement's parameters. + pub fn params(&self) -> &[Type] { + &self.params + } + + /// Returns information about the columns returned when the statement is queried. + pub fn columns(&self) -> &[Column] { + &self.columns + } +} + +#[derive(Debug)] +pub struct Column { + name: String, + type_: Type, +} + +impl Column { + pub(crate) fn new(name: String, type_: Type) -> Column { + Column { name, type_ } + } + + /// Returns the name of the column. + pub fn name(&self) -> &str { + &self.name + } + + /// Returns the type of the column. + pub fn type_(&self) -> &Type { + &self.type_ + } +} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 4c928d523..a31bef4ad 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,11 +1,12 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::FutureExt; +use futures::{try_join, FutureExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::{Client, Config, Connection, Error}; +use tokio_postgres::types::Type; mod parse; #[cfg(feature = "runtime")] @@ -95,6 +96,22 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } +#[tokio::test] +async fn pipelined_prepare() { + let mut client = connect("user=postgres").await; + + let prepare1 = client.prepare("SELECT $1::TEXT"); + let prepare2 = client.prepare("SELECT $1::BIGINT"); + + let (statement1, statement2) = try_join!(prepare1, prepare2).unwrap(); + + assert_eq!(statement1.params()[0], Type::TEXT); + assert_eq!(statement1.columns()[0].type_(), &Type::TEXT); + + assert_eq!(statement2.params()[0], Type::INT8); + assert_eq!(statement2.columns()[0].type_(), &Type::INT8); +} + /* #[test] fn pipelined_prepare() { From 90eb58dc8e4b861dd3a996d85fa9967fb7146c69 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 23 Jul 2019 20:16:31 -0700 Subject: [PATCH 192/819] Some prep for custom type lookup --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/client.rs | 54 +++++++++++++++++++++++++++++-- tokio-postgres/src/prepare.rs | 6 +++- tokio-postgres/tests/test/main.rs | 20 +----------- 4 files changed, 59 insertions(+), 23 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 405517b91..05d334353 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -31,11 +31,11 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] "with-uuid-0_7" = ["uuid-07"] [dependencies] -antidote = "1.0" bytes = "0.4" fallible-iterator = "0.2" futures-preview = { version = "0.3.0-alpha.17", features = ["nightly", "async-await"] } log = "0.4" +parking_lot = "0.9" percent-encoding = "1.0" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 46fb60c4f..783bd6b18 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,12 +1,14 @@ use crate::codec::BackendMessages; use crate::connection::{Request, RequestMessages}; use crate::prepare::prepare; -use crate::types::Type; +use crate::types::{Oid, Type}; use crate::{Error, Statement}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{Stream, StreamExt}; +use parking_lot::Mutex; use postgres_protocol::message::backend::Message; +use std::collections::HashMap; use std::future::Future; use std::pin::Pin; use std::sync::Arc; @@ -34,8 +36,16 @@ impl Responses { } } +struct State { + has_typeinfo: bool, + has_typeinfo_composite: bool, + has_typeinfo_enum: bool, + types: HashMap, +} + pub struct InnerClient { sender: mpsc::UnboundedSender, + state: Mutex, } impl InnerClient { @@ -51,6 +61,38 @@ impl InnerClient { cur: BackendMessages::empty(), }) } + + pub fn has_typeinfo(&self) -> bool { + self.state.lock().has_typeinfo + } + + pub fn set_has_typeinfo(&self) { + self.state.lock().has_typeinfo = true; + } + + pub fn has_typeinfo_composite(&self) -> bool { + self.state.lock().has_typeinfo_composite + } + + pub fn set_has_typeinfo_composite(&self) { + self.state.lock().has_typeinfo_composite = true; + } + + pub fn has_typeinfo_enum(&self) -> bool { + self.state.lock().has_typeinfo_enum + } + + pub fn set_has_typeinfo_enum(&self) { + self.state.lock().has_typeinfo_enum = true; + } + + pub fn type_(&self, oid: Oid) -> Option { + self.state.lock().types.get(&oid).cloned() + } + + pub fn set_type(&self, oid: Oid, type_: Type) { + self.state.lock().types.insert(oid, type_); + } } pub struct Client { @@ -66,7 +108,15 @@ impl Client { secret_key: i32, ) -> Client { Client { - inner: Arc::new(InnerClient { sender }), + inner: Arc::new(InnerClient { + sender, + state: Mutex::new(State { + has_typeinfo: false, + has_typeinfo_composite: false, + has_typeinfo_enum: false, + types: HashMap::new(), + }), + }), process_id, secret_key, } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 9e243c638..a26ff1a6f 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -62,10 +62,14 @@ pub async fn prepare( Ok(Statement::new(&client, name, parameters, columns)) } -async fn get_type(client: &InnerClient, oid: Oid) -> Result { +async fn get_type(client: &Arc, oid: Oid) -> Result { if let Some(type_) = Type::from_oid(oid) { return Ok(type_); } + if let Some(type_) = client.type_(oid) { + return Ok(type_); + } + unimplemented!() } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index a31bef4ad..baf0d63aa 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -5,8 +5,8 @@ use futures::{try_join, FutureExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; -use tokio_postgres::{Client, Config, Connection, Error}; use tokio_postgres::types::Type; +use tokio_postgres::{Client, Config, Connection, Error}; mod parse; #[cfg(feature = "runtime")] @@ -113,24 +113,6 @@ async fn pipelined_prepare() { } /* -#[test] -fn pipelined_prepare() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let prepare1 = client.prepare("SELECT $1::HSTORE[]"); - let prepare2 = client.prepare("SELECT $1::HSTORE[]"); - let prepare = prepare1.join(prepare2); - runtime.block_on(prepare).unwrap(); - - drop(client); - runtime.run().unwrap(); -} - #[test] fn insert_select() { let _ = env_logger::try_init(); From 77caff91ef9213d6c8979beb0b88320030aaf7bf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 24 Jul 2019 19:18:15 -0700 Subject: [PATCH 193/819] Add query/select --- tokio-postgres/src/client.rs | 61 +++++- tokio-postgres/src/connection.rs | 1 - tokio-postgres/src/lib.rs | 3 + tokio-postgres/src/maybe_tls_stream.rs | 8 +- tokio-postgres/src/prepare.rs | 3 +- tokio-postgres/src/query.rs | 136 +++++++++++++ tokio-postgres/src/row.rs | 240 +++++++++++++++++++++++ tokio-postgres/src/statement.rs | 32 ++- tokio-postgres/src/tls.rs | 16 +- tokio-postgres/src/types/mod.rs | 4 +- tokio-postgres/src/types/serde_json_1.rs | 2 +- tokio-postgres/tests/test/main.rs | 30 ++- 12 files changed, 500 insertions(+), 36 deletions(-) create mode 100644 tokio-postgres/src/query.rs create mode 100644 tokio-postgres/src/row.rs diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 783bd6b18..0f731ae48 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,16 +1,17 @@ use crate::codec::BackendMessages; use crate::connection::{Request, RequestMessages}; use crate::prepare::prepare; -use crate::types::{Oid, Type}; +use crate::query::{execute, query, Query}; +use crate::types::{Oid, ToSql, Type}; use crate::{Error, Statement}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{Stream, StreamExt}; +use futures::future; +use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; use std::collections::HashMap; use std::future::Future; -use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -20,20 +21,24 @@ pub struct Responses { } impl Responses { - pub async fn next(&mut self) -> Result { + pub fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll> { loop { match self.cur.next().map_err(Error::parse)? { - Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), - Some(message) => return Ok(message), + Some(Message::ErrorResponse(body)) => return Poll::Ready(Err(Error::db(body))), + Some(message) => return Poll::Ready(Ok(message)), None => {} } - match self.receiver.next().await { + match ready!(self.receiver.poll_next_unpin(cx)) { Some(messages) => self.cur = messages, - None => return Err(Error::closed()), + None => return Poll::Ready(Err(Error::closed())), } } } + + pub async fn next(&mut self) -> Result { + future::poll_fn(|cx| self.poll_next(cx)).await + } } struct State { @@ -140,4 +145,44 @@ impl Client { ) -> impl Future> + 'a { prepare(self.inner(), query, parameter_types) } + + pub fn query<'a>( + &mut self, + statement: &'a Statement, + params: &'a [&dyn ToSql], + ) -> impl Future> + 'a { + self.query_iter(statement, params.iter().cloned()) + } + + pub fn query_iter<'a, I>( + &mut self, + statement: &'a Statement, + params: I, + ) -> impl Future> + 'a + where + I: IntoIterator + 'a, + I::IntoIter: ExactSizeIterator, + { + query(self.inner(), statement, params) + } + + pub fn execute<'a>( + &mut self, + statement: &'a Statement, + params: &'a [&dyn ToSql], + ) -> impl Future> + 'a { + self.execute_iter(statement, params.iter().cloned()) + } + + pub fn execute_iter<'a, I>( + &mut self, + statement: &'a Statement, + params: I, + ) -> impl Future> + 'a + where + I: IntoIterator + 'a, + I::IntoIter: ExactSizeIterator, + { + execute(self.inner(), statement, params) + } } diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 4bd83c9b6..cbbec2d1e 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -10,7 +10,6 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::collections::{HashMap, VecDeque}; use std::future::Future; -use std::io; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::codec::Framed; diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index e894b2e2c..3a9e298ac 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -117,6 +117,7 @@ pub use crate::config::Config; pub use crate::connection::Connection; use crate::error::DbError; pub use crate::error::Error; +pub use crate::row::{Row, SimpleQueryRow}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; #[cfg(feature = "runtime")] @@ -137,6 +138,8 @@ mod connection; pub mod error; mod maybe_tls_stream; mod prepare; +mod query; +pub mod row; #[cfg(feature = "runtime")] mod socket; mod statement; diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs index 825334e95..9928cef42 100644 --- a/tokio-postgres/src/maybe_tls_stream.rs +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -58,11 +58,9 @@ where cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - unsafe { - match &mut *self { - MaybeTlsStream::Raw(s) => Pin::new(s).poll_write(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_write(cx, buf), - } + match &mut *self { + MaybeTlsStream::Raw(s) => Pin::new(s).poll_write(cx, buf), + MaybeTlsStream::Tls(s) => Pin::new(s).poll_write(cx, buf), } } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index a26ff1a6f..056ec3dcb 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -1,10 +1,9 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; -use crate::connection::{Request, RequestMessages}; +use crate::connection::RequestMessages; use crate::types::{Oid, Type}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; -use futures::StreamExt; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs new file mode 100644 index 000000000..4da69197a --- /dev/null +++ b/tokio-postgres/src/query.rs @@ -0,0 +1,136 @@ +use crate::client::{InnerClient, Responses}; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +use crate::types::{IsNull, ToSql}; +use crate::{Error, Row, Statement}; +use futures::{ready, Stream}; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +pub async fn query<'a, I>( + client: Arc, + statement: &Statement, + params: I, +) -> Result +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let responses = start(&client, &statement, params).await?; + + Ok(Query { + statement: statement.clone(), + responses, + }) +} + +pub async fn execute<'a, I>( + client: Arc, + statement: &Statement, + params: I, +) -> Result +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let mut responses = start(&client, &statement, params).await?; + + loop { + match responses.next().await? { + Message::DataRow(_) => {} + Message::CommandComplete(body) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + return Ok(rows); + } + Message::EmptyQueryResponse => return Ok(0), + _ => return Err(Error::unexpected_message()), + } + } +} + +async fn start<'a, I>( + client: &Arc, + statement: &Statement, + params: I, +) -> Result +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let params = params.into_iter(); + + assert!( + statement.params().len() == params.len(), + "expected {} parameters but got {}", + statement.params().len(), + params.len() + ); + + let mut buf = vec![]; + + let mut error_idx = 0; + let r = frontend::bind( + "", + statement.name(), + Some(1), + params.zip(statement.params()).enumerate(), + |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { + Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), + Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), + Err(e) => { + error_idx = idx; + Err(e) + } + }, + Some(1), + &mut buf, + ); + match r { + Ok(()) => {} + Err(frontend::BindError::Conversion(e)) => return Err(Error::to_sql(e, error_idx)), + Err(frontend::BindError::Serialization(e)) => return Err(Error::encode(e)), + } + + frontend::execute("", 0, &mut buf).map_err(Error::encode)?; + frontend::sync(&mut buf); + + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + Ok(responses) +} + +pub struct Query { + statement: Statement, + responses: Responses, +} + +impl Stream for Query { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match ready!(self.responses.poll_next(cx)?) { + Message::DataRow(body) => { + Poll::Ready(Some(Ok(Row::new(self.statement.clone(), body)?))) + } + Message::EmptyQueryResponse | Message::CommandComplete(_) => Poll::Ready(None), + Message::ErrorResponse(body) => Poll::Ready(Some(Err(Error::db(body)))), + _ => Poll::Ready(Some(Err(Error::unexpected_message()))), + } + } +} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs new file mode 100644 index 000000000..326733f84 --- /dev/null +++ b/tokio-postgres/src/row.rs @@ -0,0 +1,240 @@ +//! Rows. + +use crate::row::sealed::{AsName, Sealed}; +use crate::statement::Column; +use crate::types::{FromSql, Type, WrongType}; +use crate::{Error, Statement}; +use fallible_iterator::FallibleIterator; +use postgres_protocol::message::backend::DataRowBody; +use std::fmt; +use std::ops::Range; +use std::str; +use std::sync::Arc; + +mod sealed { + pub trait Sealed {} + + pub trait AsName { + fn as_name(&self) -> &str; + } +} + +impl AsName for Column { + fn as_name(&self) -> &str { + self.name() + } +} + +impl AsName for String { + fn as_name(&self) -> &str { + self + } +} + +/// A trait implemented by types that can index into columns of a row. +/// +/// This cannot be implemented outside of this crate. +pub trait RowIndex: Sealed { + #[doc(hidden)] + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName; +} + +impl Sealed for usize {} + +impl RowIndex for usize { + #[inline] + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName, + { + if *self >= columns.len() { + None + } else { + Some(*self) + } + } +} + +impl Sealed for str {} + +impl RowIndex for str { + #[inline] + fn __idx(&self, columns: &[T]) -> Option + where + T: AsName, + { + if let Some(idx) = columns.iter().position(|d| d.as_name() == self) { + return Some(idx); + }; + + // FIXME ASCII-only case insensitivity isn't really the right thing to + // do. Postgres itself uses a dubious wrapper around tolower and JDBC + // uses the US locale. + columns + .iter() + .position(|d| d.as_name().eq_ignore_ascii_case(self)) + } +} + +impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} + +impl<'a, T> RowIndex for &'a T +where + T: ?Sized + RowIndex, +{ + #[inline] + fn __idx(&self, columns: &[U]) -> Option + where + U: AsName, + { + T::__idx(*self, columns) + } +} + +/// A row of data returned from the database by a query. +pub struct Row { + statement: Statement, + body: DataRowBody, + ranges: Vec>>, +} + +impl Row { + pub(crate) fn new(statement: Statement, body: DataRowBody) -> Result { + let ranges = body.ranges().collect().map_err(Error::parse)?; + Ok(Row { + statement, + body, + ranges, + }) + } + + /// Returns information about the columns of data in the row. + pub fn columns(&self) -> &[Column] { + self.statement.columns() + } + + /// Determines if the row contains no values. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of values in the row. + pub fn len(&self) -> usize { + self.columns().len() + } + + /// Deserializes a value from the row. + /// + /// The value can be specified either by its numeric index in the row, or by its column name. + /// + /// # Panics + /// + /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + pub fn get<'a, I, T>(&'a self, idx: I) -> T + where + I: RowIndex + fmt::Display, + T: FromSql<'a>, + { + match self.get_inner(&idx) { + Ok(ok) => ok, + Err(err) => panic!("error retrieving column {}: {}", idx, err), + } + } + + /// Like `Row::get`, but returns a `Result` rather than panicking. + pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result + where + I: RowIndex, + T: FromSql<'a>, + { + self.get_inner(&idx) + } + + fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result + where + I: RowIndex, + T: FromSql<'a>, + { + let idx = match idx.__idx(self.columns()) { + Some(idx) => idx, + None => return Err(Error::column()), + }; + + let ty = self.columns()[idx].type_(); + if !T::accepts(ty) { + return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())), idx)); + } + + let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); + FromSql::from_sql_nullable(ty, buf).map_err(|e| Error::from_sql(e, idx)) + } +} + +/// A row of data returned from the database by a simple query. +pub struct SimpleQueryRow { + columns: Arc<[String]>, + body: DataRowBody, + ranges: Vec>>, +} + +impl SimpleQueryRow { + #[allow(clippy::new_ret_no_self)] + pub(crate) fn new(columns: Arc<[String]>, body: DataRowBody) -> Result { + let ranges = body.ranges().collect().map_err(Error::parse)?; + Ok(SimpleQueryRow { + columns, + body, + ranges, + }) + } + + /// Determines if the row contains no values. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of values in the row. + pub fn len(&self) -> usize { + self.columns.len() + } + + /// Returns a value from the row. + /// + /// The value can be specified either by its numeric index in the row, or by its column name. + /// + /// # Panics + /// + /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + pub fn get(&self, idx: I) -> Option<&str> + where + I: RowIndex + fmt::Display, + { + match self.get_inner(&idx) { + Ok(ok) => ok, + Err(err) => panic!("error retrieving column {}: {}", idx, err), + } + } + + /// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking. + pub fn try_get(&self, idx: I) -> Result, Error> + where + I: RowIndex, + { + self.get_inner(&idx) + } + + fn get_inner(&self, idx: &I) -> Result, Error> + where + I: RowIndex, + { + let idx = match idx.__idx(&self.columns) { + Some(idx) => idx, + None => return Err(Error::column()), + }; + + let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); + FromSql::from_sql_nullable(&Type::TEXT, buf).map_err(|e| Error::from_sql(e, idx)) + } +} diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index b18592eb0..09a7274f2 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -1,15 +1,31 @@ use crate::client::InnerClient; -use crate::connection::Request; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; use crate::types::Type; +use postgres_protocol::message::frontend; use std::sync::{Arc, Weak}; -pub struct Statement { +struct StatementInner { client: Weak, name: String, params: Vec, columns: Vec, } +impl Drop for StatementInner { + fn drop(&mut self) { + if let Some(client) = self.client.upgrade() { + let mut buf = vec![]; + frontend::close(b'S', &self.name, &mut buf).expect("statement name not valid"); + frontend::sync(&mut buf); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + } +} + +#[derive(Clone)] +pub struct Statement(Arc); + impl Statement { pub(crate) fn new( inner: &Arc, @@ -17,22 +33,26 @@ impl Statement { params: Vec, columns: Vec, ) -> Statement { - Statement { + Statement(Arc::new(StatementInner { client: Arc::downgrade(inner), name, params, columns, - } + })) + } + + pub(crate) fn name(&self) -> &str { + &self.0.name } /// Returns the expected types of the statement's parameters. pub fn params(&self) -> &[Type] { - &self.params + &self.0.params } /// Returns information about the columns returned when the statement is queried. pub fn columns(&self) -> &[Column] { - &self.columns + &self.0.columns } } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 1e1adeb23..109caac18 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -104,7 +104,7 @@ pub struct NoTlsFuture(()); impl Future for NoTlsFuture { type Output = Result<(NoTlsStream, ChannelBinding), NoTlsError>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { Poll::Ready(Err(NoTlsError(()))) } } @@ -117,27 +117,23 @@ pub enum NoTlsStream {} impl AsyncRead for NoTlsStream { fn poll_read( self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], + _: &mut Context<'_>, + _: &mut [u8], ) -> Poll> { match *self {} } } impl AsyncWrite for NoTlsStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { + fn poll_write(self: Pin<&mut Self>, _: &mut Context<'_>, _: &[u8]) -> Poll> { match *self {} } - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { match *self {} } - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { match *self {} } } diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 6dc2692a5..9cf5a1cd9 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -543,7 +543,7 @@ pub enum IsNull { /// /// `ToSql` is implemented for `Vec` and `&[T]` where `T` implements `ToSql`, /// and corresponds to one-dimensional Postgres arrays with an index offset of 1. -pub trait ToSql: fmt::Debug { +pub trait ToSql: fmt::Debug + Sync + Send { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. /// @@ -744,7 +744,7 @@ simple_to!(f64, float8_to_sql, FLOAT8); impl ToSql for HashMap, H> where - H: BuildHasher, + H: BuildHasher + Sync + Send, { fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::hstore_to_sql( diff --git a/tokio-postgres/src/types/serde_json_1.rs b/tokio-postgres/src/types/serde_json_1.rs index b3b2e3c90..5ee098be9 100644 --- a/tokio-postgres/src/types/serde_json_1.rs +++ b/tokio-postgres/src/types/serde_json_1.rs @@ -33,7 +33,7 @@ where impl ToSql for Json where - T: Serialize + Debug, + T: Serialize + Debug + Sync + Send, { fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { if *ty == Type::JSONB { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index baf0d63aa..411929e0f 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,7 +1,7 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::{try_join, FutureExt}; +use futures::{try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; @@ -112,6 +112,34 @@ async fn pipelined_prepare() { assert_eq!(statement2.columns()[0].type_(), &Type::INT8); } +#[tokio::test] +async fn insert_select() { + let mut client = connect("user=postgres").await; + + let setup = client + .prepare("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") + .await + .unwrap(); + client.execute(&setup, &[]).await.unwrap(); + drop(setup); + + let insert = client.prepare("INSERT INTO foo (name) VALUES ($1), ($2)"); + let select = client.prepare("SELECT id, name FROM foo ORDER BY id"); + let (insert, select) = try_join!(insert, select).unwrap(); + + let insert = client.execute(&insert, &[&"alice", &"bob"]); + let select = client + .query(&select, &[]) + .and_then(|q| q.try_collect::>()); + let (_, rows) = try_join!(insert, select).unwrap(); + + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[0].get::<_, &str>(1), "alice"); + assert_eq!(rows[1].get::<_, i32>(0), 2); + assert_eq!(rows[1].get::<_, &str>(1), "bob"); +} + /* #[test] fn insert_select() { From 4396f38fcc6fe8179d0acf5acacbff44383003b3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 25 Jul 2019 07:21:53 -0700 Subject: [PATCH 194/819] Setup socket --- tokio-postgres/src/connect_socket.rs | 11 ++++++++- tokio-postgres/tests/test/main.rs | 36 ---------------------------- 2 files changed, 10 insertions(+), 37 deletions(-) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index d88edbc9d..bcc00ed04 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -31,7 +31,16 @@ pub async fn connect_socket(idx: usize, config: &Config) -> Result return Ok(Socket::new_tcp(socket)), + Ok(socket) => { + socket.set_nodelay(true).map_err(Error::connect)?; + if config.keepalives { + socket + .set_keepalive(Some(config.keepalives_idle)) + .map_err(Error::connect)?; + } + + return Ok(Socket::new_tcp(socket)); + } Err(e) => e, }; error = Some(new_error); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 411929e0f..5d27a43d5 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -141,42 +141,6 @@ async fn insert_select() { } /* -#[test] -fn insert_select() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") - .for_each(|_| Ok(())), - ) - .unwrap(); - - let insert = client.prepare("INSERT INTO foo (name) VALUES ($1), ($2)"); - let select = client.prepare("SELECT id, name FROM foo ORDER BY id"); - let prepare = insert.join(select); - let (insert, select) = runtime.block_on(prepare).unwrap(); - - let insert = client - .execute(&insert, &[&"alice", &"bob"]) - .map(|n| assert_eq!(n, 2)); - let select = client.query(&select, &[]).collect().map(|rows| { - assert_eq!(rows.len(), 2); - assert_eq!(rows[0].get::<_, i32>(0), 1); - assert_eq!(rows[0].get::<_, &str>(1), "alice"); - assert_eq!(rows[1].get::<_, i32>(0), 2); - assert_eq!(rows[1].get::<_, &str>(1), "bob"); - }); - let tests = insert.join(select); - runtime.block_on(tests).unwrap(); -} - #[test] fn query_portal() { let _ = env_logger::try_init(); From 2c43519093aab98c2e1e6e180dfd7490b546727c Mon Sep 17 00:00:00 2001 From: Kai Yao Date: Sat, 27 Jul 2019 21:07:03 -0500 Subject: [PATCH 195/819] Updated bitvec to 0.6.1, as it supports serde serialization/deserialization. --- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 4 ++-- tokio-postgres/tests/test/runtime.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 5200f788b..ac1c120e9 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -20,7 +20,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] -"with-bit-vec-0_5" = ["tokio-postgres/with-bit-vec-0_5"] +"with-bit-vec-0_6" = ["tokio-postgres/with-bit-vec-0_6"] "with-chrono-0_4" = ["tokio-postgres/with-chrono-0_4"] "with-eui48-0_4" = ["tokio-postgres/with-eui48-0_4"] "with-geo-types-0_4" = ["tokio-postgres/with-geo-types-0_4"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 8d2d6d00a..673279ea5 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -23,7 +23,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio-tcp", "tokio-timer", "tokio-uds", "futures-cpupool", "lazy_static"] -"with-bit-vec-0_5" = ["bit-vec-05"] +"with-bit-vec-0_6" = ["bit-vec-06"] "with-chrono-0_4" = ["chrono-04"] "with-eui48-0_4" = ["eui48-04"] "with-geo-types-0_4" = ["geo-types-04"] @@ -48,7 +48,7 @@ futures-cpupool = { version = "0.1", optional = true } lazy_static = { version = "1.0", optional = true } tokio-timer = { version = "0.2", optional = true } -bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } +bit-vec-06 = { version = "0.6.1", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 2af9a18d7..f959a16ef 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -57,7 +57,7 @@ fn target_session_attrs_ok() { "host=localhost port=5433 user=postgres target_session_attrs=read-write", NoTls, ); - runtime.block_on(f).unwrap(); + let _ = runtime.block_on(f).unwrap(); } #[test] From 51f02c89eebe9d7d9c2915330eeb9fdb9f0e1853 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 26 Jul 2019 20:11:34 -0700 Subject: [PATCH 196/819] Fully working prepare --- tokio-postgres/src/client.rs | 95 +-- tokio-postgres/src/prepare.rs | 251 ++++++- tokio-postgres/src/query.rs | 55 +- tokio-postgres/src/types/mod.rs | 16 +- tokio-postgres/src/types/serde_json_1.rs | 2 +- tokio-postgres/tests/test/main.rs | 297 ++++---- tokio-postgres/tests/test/runtime.rs | 10 +- tokio-postgres/tests/test/types/bit_vec_07.rs | 14 +- tokio-postgres/tests/test/types/chrono_04.rs | 62 +- tokio-postgres/tests/test/types/eui48_04.rs | 7 +- tokio-postgres/tests/test/types/geo_010.rs | 27 +- tokio-postgres/tests/test/types/mod.rs | 646 +++++++++--------- .../tests/test/types/serde_json_1.rs | 14 +- tokio-postgres/tests/test/types/uuid_07.rs | 7 +- 14 files changed, 826 insertions(+), 677 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 0f731ae48..e49c68da3 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,7 +1,7 @@ use crate::codec::BackendMessages; use crate::connection::{Request, RequestMessages}; use crate::prepare::prepare; -use crate::query::{execute, query, Query}; +use crate::query::{self, Query}; use crate::types::{Oid, ToSql, Type}; use crate::{Error, Statement}; use fallible_iterator::FallibleIterator; @@ -42,9 +42,9 @@ impl Responses { } struct State { - has_typeinfo: bool, - has_typeinfo_composite: bool, - has_typeinfo_enum: bool, + typeinfo: Option, + typeinfo_composite: Option, + typeinfo_enum: Option, types: HashMap, } @@ -67,36 +67,36 @@ impl InnerClient { }) } - pub fn has_typeinfo(&self) -> bool { - self.state.lock().has_typeinfo + pub fn typeinfo(&self) -> Option { + self.state.lock().typeinfo.clone() } - pub fn set_has_typeinfo(&self) { - self.state.lock().has_typeinfo = true; + pub fn set_typeinfo(&self, statement: &Statement) { + self.state.lock().typeinfo = Some(statement.clone()); } - pub fn has_typeinfo_composite(&self) -> bool { - self.state.lock().has_typeinfo_composite + pub fn typeinfo_composite(&self) -> Option { + self.state.lock().typeinfo_composite.clone() } - pub fn set_has_typeinfo_composite(&self) { - self.state.lock().has_typeinfo_composite = true; + pub fn set_typeinfo_composite(&self, statement: &Statement) { + self.state.lock().typeinfo_composite = Some(statement.clone()); } - pub fn has_typeinfo_enum(&self) -> bool { - self.state.lock().has_typeinfo_enum + pub fn typeinfo_enum(&self) -> Option { + self.state.lock().typeinfo_enum.clone() } - pub fn set_has_typeinfo_enum(&self) { - self.state.lock().has_typeinfo_enum = true; + pub fn set_typeinfo_enum(&self, statement: &Statement) { + self.state.lock().typeinfo_enum = Some(statement.clone()); } pub fn type_(&self, oid: Oid) -> Option { self.state.lock().types.get(&oid).cloned() } - pub fn set_type(&self, oid: Oid, type_: Type) { - self.state.lock().types.insert(oid, type_); + pub fn set_type(&self, oid: Oid, type_: &Type) { + self.state.lock().types.insert(oid, type_.clone()); } } @@ -116,9 +116,9 @@ impl Client { inner: Arc::new(InnerClient { sender, state: Mutex::new(State { - has_typeinfo: false, - has_typeinfo_composite: false, - has_typeinfo_enum: false, + typeinfo: None, + typeinfo_composite: None, + typeinfo_enum: None, types: HashMap::new(), }), }), @@ -131,58 +131,59 @@ impl Client { self.inner.clone() } - pub fn prepare<'a>( - &mut self, - query: &'a str, - ) -> impl Future> + 'a { + pub fn prepare(&mut self, query: &str) -> impl Future> { self.prepare_typed(query, &[]) } - pub fn prepare_typed<'a>( + pub fn prepare_typed( &mut self, - query: &'a str, - parameter_types: &'a [Type], - ) -> impl Future> + 'a { + query: &str, + parameter_types: &[Type], + ) -> impl Future> { prepare(self.inner(), query, parameter_types) } - pub fn query<'a>( + pub fn query( &mut self, - statement: &'a Statement, - params: &'a [&dyn ToSql], - ) -> impl Future> + 'a { - self.query_iter(statement, params.iter().cloned()) + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Future> { + let buf = query::encode(statement, params.iter().cloned()); + query::query(self.inner(), statement.clone(), buf) } pub fn query_iter<'a, I>( &mut self, - statement: &'a Statement, + statement: &Statement, params: I, - ) -> impl Future> + 'a + ) -> impl Future> where - I: IntoIterator + 'a, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - query(self.inner(), statement, params) + let buf = query::encode(statement, params); + query::query(self.inner(), statement.clone(), buf) } - pub fn execute<'a>( + pub fn execute( &mut self, - statement: &'a Statement, - params: &'a [&dyn ToSql], - ) -> impl Future> + 'a { - self.execute_iter(statement, params.iter().cloned()) + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Future> { + let buf = query::encode(statement, params.iter().cloned()); + query::execute(self.inner(), buf) } pub fn execute_iter<'a, I>( &mut self, - statement: &'a Statement, + statement: &Statement, params: I, - ) -> impl Future> + 'a + ) -> impl Future> where - I: IntoIterator + 'a, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - execute(self.inner(), statement, params) + let buf = query::encode(statement, params); + query::execute(self.inner(), buf) } } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 056ec3dcb..8cc880915 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -1,74 +1,257 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::{Oid, Type}; +use crate::error::SqlState; +use crate::query; +use crate::types::{Field, Kind, Oid, ToSql, Type}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; +use futures::{future, StreamExt, TryStreamExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; +use std::future::Future; +use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +const TYPEINFO_STMT: &str = "typeinfo"; + +const TYPEINFO_QUERY: &str = "\ +SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid +FROM pg_catalog.pg_type t +LEFT OUTER JOIN pg_catalog.pg_range r ON r.rngtypid = t.oid +INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid +WHERE t.oid = $1 +"; + +// Range types weren't added until Postgres 9.2, so pg_range may not exist +const TYPEINFO_FALLBACK_QUERY: &str = "\ +SELECT t.typname, t.typtype, t.typelem, NULL::OID, t.typbasetype, n.nspname, t.typrelid +FROM pg_catalog.pg_type t +INNER JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid +WHERE t.oid = $1 +"; + +const TYPEINFO_ENUM_QUERY: &str = "\ +SELECT enumlabel +FROM pg_catalog.pg_enum +WHERE enumtypid = $1 +ORDER BY enumsortorder +"; + +// Postgres 9.0 didn't have enumsortorder +const TYPEINFO_ENUM_FALLBACK_QUERY: &str = "\ +SELECT enumlabel +FROM pg_catalog.pg_enum +WHERE enumtypid = $1 +ORDER BY oid +"; + +const TYPEINFO_COMPOSITE_QUERY: &str = "\ +SELECT attname, atttypid +FROM pg_catalog.pg_attribute +WHERE attrelid = $1 +AND NOT attisdropped +AND attnum > 0 +ORDER BY attnum +"; + static NEXT_ID: AtomicUsize = AtomicUsize::new(0); -pub async fn prepare( +pub fn prepare( client: Arc, query: &str, types: &[Type], -) -> Result { +) -> impl Future> { let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); + let buf = encode(&name, query, types); + + async move { + let buf = buf?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + match responses.next().await? { + Message::ParseComplete => {} + _ => return Err(Error::unexpected_message()), + } + + let parameter_description = match responses.next().await? { + Message::ParameterDescription(body) => body, + _ => return Err(Error::unexpected_message()), + }; + + let row_description = match responses.next().await? { + Message::RowDescription(body) => Some(body), + Message::NoData => None, + _ => return Err(Error::unexpected_message()), + }; + + let mut parameters = vec![]; + let mut it = parameter_description.parameters(); + while let Some(oid) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, oid).await?; + parameters.push(type_); + } + + let mut columns = vec![]; + if let Some(row_description) = row_description { + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, field.type_oid()).await?; + let column = Column::new(field.name().to_string(), type_); + columns.push(column); + } + } + + Ok(Statement::new(&client, name, parameters, columns)) + } +} + +fn encode(name: &str, query: &str, types: &[Type]) -> Result, Error> { let mut buf = vec![]; - frontend::parse(&name, query, types.iter().map(Type::oid), &mut buf).map_err(Error::encode)?; + frontend::parse(name, query, types.iter().map(Type::oid), &mut buf).map_err(Error::encode)?; frontend::describe(b'S', &name, &mut buf).map_err(Error::encode)?; frontend::sync(&mut buf); - let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + Ok(buf) +} - match responses.next().await? { - Message::ParseComplete => {} - _ => return Err(Error::unexpected_message()), +async fn get_type(client: &Arc, oid: Oid) -> Result { + if let Some(type_) = Type::from_oid(oid) { + return Ok(type_); + } + + if let Some(type_) = client.type_(oid) { + return Ok(type_); } - let parameter_description = match responses.next().await? { - Message::ParameterDescription(body) => body, - _ => return Err(Error::unexpected_message()), + let stmt = typeinfo_statement(client).await?; + + let params: &[&dyn ToSql] = &[&oid]; + let buf = query::encode(&stmt, params.iter().cloned()); + let mut rows = query::query(client.clone(), stmt, buf).await?; + + let row = match rows.try_next().await? { + Some(row) => row, + None => return Err(Error::unexpected_message()), }; - let row_description = match responses.next().await? { - Message::RowDescription(body) => Some(body), - Message::NoData => None, - _ => return Err(Error::unexpected_message()), + let name: String = row.try_get(0)?; + let type_: i8 = row.try_get(1)?; + let elem_oid: Oid = row.try_get(2)?; + let rngsubtype: Option = row.try_get(3)?; + let basetype: Oid = row.try_get(4)?; + let schema: String = row.try_get(5)?; + let relid: Oid = row.try_get(6)?; + + let kind = if type_ == b'e' as i8 { + let variants = get_enum_variants(client, oid).await?; + Kind::Enum(variants) + } else if type_ == b'p' as i8 { + Kind::Pseudo + } else if basetype != 0 { + let type_ = get_type_rec(client, basetype).await?; + Kind::Domain(type_) + } else if elem_oid != 0 { + let type_ = get_type_rec(client, elem_oid).await?; + Kind::Array(type_) + } else if relid != 0 { + let fields = get_composite_fields(client, relid).await?; + Kind::Composite(fields) + } else if let Some(rngsubtype) = rngsubtype { + let type_ = get_type_rec(client, rngsubtype).await?; + Kind::Range(type_) + } else { + Kind::Simple }; - let mut parameters = vec![]; - let mut it = parameter_description.parameters(); - while let Some(oid) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, oid).await?; - parameters.push(type_); + let type_ = Type::new(name, oid, kind, schema); + client.set_type(oid, &type_); + + Ok(type_) +} + +fn get_type_rec<'a>( + client: &'a Arc, + oid: Oid, +) -> Pin> + 'a>> { + Box::pin(get_type(client, oid)) +} + +async fn typeinfo_statement(client: &Arc) -> Result { + if let Some(stmt) = client.typeinfo() { + return Ok(stmt); } - let mut columns = vec![]; - if let Some(row_description) = row_description { - let mut it = row_description.fields(); - while let Some(field) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, field.type_oid()).await?; - let column = Column::new(field.name().to_string(), type_); - columns.push(column); + let stmt = match Box::pin(prepare(client.clone(), TYPEINFO_QUERY, &[])).await { + Ok(stmt) => stmt, + Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { + Box::pin(prepare(client.clone(), TYPEINFO_FALLBACK_QUERY, &[])).await? } + Err(e) => return Err(e), + }; + + client.set_typeinfo(&stmt); + Ok(stmt) +} + +async fn get_enum_variants(client: &Arc, oid: Oid) -> Result, Error> { + let stmt = typeinfo_enum_statement(client).await?; + + let params: &[&dyn ToSql] = &[&oid]; + let buf = query::encode(&stmt, params.iter().cloned()); + query::query(client.clone(), stmt, buf) + .await? + .and_then(|row| future::ready(row.try_get(0))) + .try_collect() + .await +} + +async fn typeinfo_enum_statement(client: &Arc) -> Result { + if let Some(stmt) = client.typeinfo_enum() { + return Ok(stmt); } - Ok(Statement::new(&client, name, parameters, columns)) + let stmt = match Box::pin(prepare(client.clone(), TYPEINFO_ENUM_QUERY, &[])).await { + Ok(stmt) => stmt, + Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { + Box::pin(prepare(client.clone(), TYPEINFO_ENUM_FALLBACK_QUERY, &[])).await? + } + Err(e) => return Err(e), + }; + + client.set_typeinfo_enum(&stmt); + Ok(stmt) } -async fn get_type(client: &Arc, oid: Oid) -> Result { - if let Some(type_) = Type::from_oid(oid) { - return Ok(type_); +async fn get_composite_fields(client: &Arc, oid: Oid) -> Result, Error> { + let stmt = typeinfo_composite_statement(client).await?; + + let params: &[&dyn ToSql] = &[&oid]; + let buf = query::encode(&stmt, params.iter().cloned()); + let rows = query::query(client.clone(), stmt, buf) + .await? + .try_collect::>() + .await?; + + let mut fields = vec![]; + for row in rows { + let name = row.try_get(0)?; + let oid = row.try_get(1)?; + let type_ = Box::pin(get_type(client, oid)).await?; + fields.push(Field::new(name, type_)); } - if let Some(type_) = client.type_(oid) { - return Ok(type_); + Ok(fields) +} + +async fn typeinfo_composite_statement(client: &Arc) -> Result { + if let Some(stmt) = client.typeinfo_composite() { + return Ok(stmt); } - unimplemented!() + let stmt = Box::pin(prepare(client.clone(), TYPEINFO_COMPOSITE_QUERY, &[])).await?; + + client.set_typeinfo_composite(&stmt); + Ok(stmt) } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 4da69197a..e9702f91a 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -11,33 +11,21 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -pub async fn query<'a, I>( +pub async fn query( client: Arc, - statement: &Statement, - params: I, -) -> Result -where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, -{ - let responses = start(&client, &statement, params).await?; + statement: Statement, + buf: Result, Error>, +) -> Result { + let responses = start(client, buf).await?; Ok(Query { - statement: statement.clone(), + statement, responses, }) } -pub async fn execute<'a, I>( - client: Arc, - statement: &Statement, - params: I, -) -> Result -where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, -{ - let mut responses = start(&client, &statement, params).await?; +pub async fn execute(client: Arc, buf: Result, Error>) -> Result { + let mut responses = start(client, buf).await?; loop { match responses.next().await? { @@ -59,11 +47,19 @@ where } } -async fn start<'a, I>( - client: &Arc, - statement: &Statement, - params: I, -) -> Result +async fn start(client: Arc, buf: Result, Error>) -> Result { + let buf = buf?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + Ok(responses) +} + +pub fn encode<'a, I>(statement: &Statement, params: I) -> Result, Error> where I: IntoIterator, I::IntoIter: ExactSizeIterator, @@ -105,14 +101,7 @@ where frontend::execute("", 0, &mut buf).map_err(Error::encode)?; frontend::sync(&mut buf); - let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - - match responses.next().await? { - Message::BindComplete => {} - _ => return Err(Error::unexpected_message()), - } - - Ok(responses) + Ok(buf) } pub struct Query { diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index 9cf5a1cd9..baa98a8a2 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -107,7 +107,7 @@ impl fmt::Display for Type { } impl Type { - pub(crate) fn _new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { + pub(crate) fn new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { Type(Inner::Other(Arc::new(Other { name, oid, @@ -175,6 +175,10 @@ pub struct Field { } impl Field { + pub(crate) fn new(name: String, type_: Type) -> Field { + Field { name, type_ } + } + /// Returns the name of the field. pub fn name(&self) -> &str { &self.name @@ -186,12 +190,6 @@ impl Field { } } -impl Field { - pub(crate) fn new(name: String, type_: Type) -> Field { - Field { name, type_ } - } -} - /// An error indicating that a `NULL` Postgres value was passed to a `FromSql` /// implementation that does not support `NULL` values. #[derive(Debug, Clone, Copy)] @@ -543,7 +541,7 @@ pub enum IsNull { /// /// `ToSql` is implemented for `Vec` and `&[T]` where `T` implements `ToSql`, /// and corresponds to one-dimensional Postgres arrays with an index offset of 1. -pub trait ToSql: fmt::Debug + Sync + Send { +pub trait ToSql: fmt::Debug { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. /// @@ -744,7 +742,7 @@ simple_to!(f64, float8_to_sql, FLOAT8); impl ToSql for HashMap, H> where - H: BuildHasher + Sync + Send, + H: BuildHasher, { fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { types::hstore_to_sql( diff --git a/tokio-postgres/src/types/serde_json_1.rs b/tokio-postgres/src/types/serde_json_1.rs index 5ee098be9..b3b2e3c90 100644 --- a/tokio-postgres/src/types/serde_json_1.rs +++ b/tokio-postgres/src/types/serde_json_1.rs @@ -33,7 +33,7 @@ where impl ToSql for Json where - T: Serialize + Debug + Sync + Send, + T: Serialize + Debug, { fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { if *ty == Type::JSONB { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 5d27a43d5..7bc398764 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,19 +1,17 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::{try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{try_join, FutureExt, TryFutureExt, TryStreamExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; -use tokio_postgres::types::Type; +use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{Client, Config, Connection, Error}; mod parse; #[cfg(feature = "runtime")] mod runtime; -/* mod types; -*/ async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) @@ -100,13 +98,13 @@ async fn scram_password_ok() { async fn pipelined_prepare() { let mut client = connect("user=postgres").await; - let prepare1 = client.prepare("SELECT $1::TEXT"); + let prepare1 = client.prepare("SELECT $1::HSTORE[]"); let prepare2 = client.prepare("SELECT $1::BIGINT"); let (statement1, statement2) = try_join!(prepare1, prepare2).unwrap(); - assert_eq!(statement1.params()[0], Type::TEXT); - assert_eq!(statement1.columns()[0].type_(), &Type::TEXT); + assert_eq!(statement1.params()[0].name(), "_hstore"); + assert_eq!(statement1.columns()[0].type_().name(), "_hstore"); assert_eq!(statement2.params()[0], Type::INT8); assert_eq!(statement2.columns()[0].type_(), &Type::INT8); @@ -140,110 +138,23 @@ async fn insert_select() { assert_eq!(rows[1].get::<_, &str>(1), "bob"); } -/* -#[test] -fn query_portal() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); - INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); - BEGIN;", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let statement = runtime - .block_on(client.prepare("SELECT id, name FROM foo ORDER BY id")) - .unwrap(); - let portal = runtime.block_on(client.bind(&statement, &[])).unwrap(); - - let f1 = client.query_portal(&portal, 2).collect(); - let f2 = client.query_portal(&portal, 2).collect(); - let f3 = client.query_portal(&portal, 2).collect(); - let (r1, r2, r3) = runtime.block_on(f1.join3(f2, f3)).unwrap(); - - assert_eq!(r1.len(), 2); - assert_eq!(r1[0].get::<_, i32>(0), 1); - assert_eq!(r1[0].get::<_, &str>(1), "alice"); - assert_eq!(r1[1].get::<_, i32>(0), 2); - assert_eq!(r1[1].get::<_, &str>(1), "bob"); - - assert_eq!(r2.len(), 1); - assert_eq!(r2[0].get::<_, i32>(0), 3); - assert_eq!(r2[0].get::<_, &str>(1), "charlie"); - - assert_eq!(r3.len(), 0); -} - -#[test] -fn cancel_query_raw() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let sleep = client - .simple_query("SELECT pg_sleep(100)") - .for_each(|_| Ok(())) - .then(|r| match r { - Ok(_) => panic!("unexpected success"), - Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), - Err(e) => panic!("unexpected error {}", e), - }); - let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) - .then(|r| { - r.unwrap(); - TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - }) - .then(|r| { - let s = r.unwrap(); - client.cancel_query_raw(s, NoTls) - }) - .then(|r| { - r.unwrap(); - Ok::<(), ()>(()) - }); - - let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); -} - -#[test] -fn custom_enum() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); +#[tokio::test] +async fn custom_enum() { + let mut client = connect("user=postgres").await; - runtime - .block_on( - client - .simple_query( - "CREATE TYPE pg_temp.mood AS ENUM ( - 'sad', - 'ok', - 'happy' - )", - ) - .for_each(|_| Ok(())), + let create = client + .prepare( + "CREATE TYPE pg_temp.mood AS ENUM ( + 'sad', + 'ok', + 'happy' + )", ) + .await .unwrap(); + client.execute(&create, &[]).await.unwrap(); - let select = client.prepare("SELECT $1::mood"); - let select = runtime.block_on(select).unwrap(); + let select = client.prepare("SELECT $1::mood").await.unwrap(); let ty = &select.params()[0]; assert_eq!("mood", ty.name()); @@ -253,53 +164,37 @@ fn custom_enum() { "ok".to_string(), "happy".to_string(), ]), - ty.kind() + ty.kind(), ); } -#[test] -fn custom_domain() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); +#[tokio::test] +async fn custom_domain() { + let mut client = connect("user=postgres").await; - runtime - .block_on( - client - .simple_query( - "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)", - ) - .for_each(|_| Ok(())), - ) + let create = client + .prepare("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") + .await .unwrap(); + client.execute(&create, &[]).await.unwrap(); - let select = client.prepare("SELECT $1::session_id"); - let select = runtime.block_on(select).unwrap(); + let select = client.prepare("SELECT $1::session_id").await.unwrap(); let ty = &select.params()[0]; assert_eq!("session_id", ty.name()); assert_eq!(&Kind::Domain(Type::BYTEA), ty.kind()); } -#[test] -fn custom_array() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); +#[tokio::test] +async fn custom_array() { + let mut client = connect("user=postgres").await; - let select = client.prepare("SELECT $1::HSTORE[]"); - let select = runtime.block_on(select).unwrap(); + let select = client.prepare("SELECT $1::HSTORE[]").await.unwrap(); let ty = &select.params()[0]; assert_eq!("_hstore", ty.name()); - match *ty.kind() { - Kind::Array(ref ty) => { + match ty.kind() { + Kind::Array(ty) => { assert_eq!("hstore", ty.name()); assert_eq!(&Kind::Simple, ty.kind()); } @@ -307,36 +202,28 @@ fn custom_array() { } } -#[test] -fn custom_composite() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); +#[tokio::test] +async fn custom_composite() { + let mut client = connect("user=postgres").await; - runtime - .block_on( - client - .simple_query( - "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", - ) - .for_each(|_| Ok(())), + let create = client + .prepare( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier INTEGER, + price NUMERIC + )", ) + .await .unwrap(); + client.execute(&create, &[]).await.unwrap(); - let select = client.prepare("SELECT $1::inventory_item"); - let select = runtime.block_on(select).unwrap(); + let select = client.prepare("SELECT $1::inventory_item").await.unwrap(); let ty = &select.params()[0]; assert_eq!(ty.name(), "inventory_item"); - match *ty.kind() { - Kind::Composite(ref fields) => { + match ty.kind() { + Kind::Composite(fields) => { assert_eq!(fields[0].name(), "name"); assert_eq!(fields[0].type_(), &Type::TEXT); assert_eq!(fields[1].name(), "supplier"); @@ -344,12 +231,35 @@ fn custom_composite() { assert_eq!(fields[2].name(), "price"); assert_eq!(fields[2].type_(), &Type::NUMERIC); } - ref t => panic!("bad type {:?}", t), + _ => panic!("unexpected kind"), } } +#[tokio::test] +async fn custom_range() { + let mut client = connect("user=postgres").await; + + let create = client + .prepare( + "CREATE TYPE pg_temp.floatrange AS RANGE ( + subtype = float8, + subtype_diff = float8mi + )", + ) + .await + .unwrap(); + client.execute(&create, &[]).await.unwrap(); + + let select = client.prepare("SELECT $1::floatrange").await.unwrap(); + + let ty = &select.params()[0]; + assert_eq!("floatrange", ty.name()); + assert_eq!(&Kind::Range(Type::FLOAT8), ty.kind()); +} + +/* #[test] -fn custom_range() { +fn query_portal() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); @@ -361,25 +271,39 @@ fn custom_range() { .block_on( client .simple_query( - "CREATE TYPE pg_temp.floatrange AS RANGE ( - subtype = float8, - subtype_diff = float8mi - )", + "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); + BEGIN;", ) .for_each(|_| Ok(())), ) .unwrap(); - let select = client.prepare("SELECT $1::floatrange"); - let select = runtime.block_on(select).unwrap(); + let statement = runtime + .block_on(client.prepare("SELECT id, name FROM foo ORDER BY id")) + .unwrap(); + let portal = runtime.block_on(client.bind(&statement, &[])).unwrap(); - let ty = &select.params()[0]; - assert_eq!("floatrange", ty.name()); - assert_eq!(&Kind::Range(Type::FLOAT8), ty.kind()); + let f1 = client.query_portal(&portal, 2).collect(); + let f2 = client.query_portal(&portal, 2).collect(); + let f3 = client.query_portal(&portal, 2).collect(); + let (r1, r2, r3) = runtime.block_on(f1.join3(f2, f3)).unwrap(); + + assert_eq!(r1.len(), 2); + assert_eq!(r1[0].get::<_, i32>(0), 1); + assert_eq!(r1[0].get::<_, &str>(1), "alice"); + assert_eq!(r1[1].get::<_, i32>(0), 2); + assert_eq!(r1[1].get::<_, &str>(1), "bob"); + + assert_eq!(r2.len(), 1); + assert_eq!(r2[0].get::<_, i32>(0), 3); + assert_eq!(r2[0].get::<_, &str>(1), "charlie"); + + assert_eq!(r3.len(), 0); } #[test] -fn custom_simple() { +fn cancel_query_raw() { let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); @@ -387,12 +311,29 @@ fn custom_simple() { let connection = connection.map_err(|e| panic!("{}", e)); runtime.handle().spawn(connection).unwrap(); - let select = client.prepare("SELECT $1::HSTORE"); - let select = runtime.block_on(select).unwrap(); + let sleep = client + .simple_query("SELECT pg_sleep(100)") + .for_each(|_| Ok(())) + .then(|r| match r { + Ok(_) => panic!("unexpected success"), + Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), + Err(e) => panic!("unexpected error {}", e), + }); + let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) + .then(|r| { + r.unwrap(); + TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + }) + .then(|r| { + let s = r.unwrap(); + client.cancel_query_raw(s, NoTls) + }) + .then(|r| { + r.unwrap(); + Ok::<(), ()>(()) + }); - let ty = &select.params()[0]; - assert_eq!("hstore", ty.name()); - assert_eq!(&Kind::Simple, ty.kind()); + let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); } #[test] diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index b9939e51e..b66f81fc7 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,6 +1,5 @@ -use futures::{Future, FutureExt, Stream}; +use futures::{FutureExt, TryStreamExt}; use std::time::{Duration, Instant}; -use tokio::runtime::current_thread::Runtime; use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::NoTls; @@ -10,10 +9,9 @@ async fn smoke_test(s: &str) { let connection = connection.map(|e| e.unwrap()); tokio::spawn(connection); - /* - let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); - runtime.block_on(execute).unwrap(); - */ + let stmt = client.prepare("SELECT $1::INT").await.unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap().try_collect::>().await.unwrap(); + assert_eq!(rows[0].get::<_, i32>(0), 1i32); } #[tokio::test] diff --git a/tokio-postgres/tests/test/types/bit_vec_07.rs b/tokio-postgres/tests/test/types/bit_vec_07.rs index 3ab29ee72..4c0e426fb 100644 --- a/tokio-postgres/tests/test/types/bit_vec_07.rs +++ b/tokio-postgres/tests/test/types/bit_vec_07.rs @@ -2,28 +2,30 @@ use bit_vec_07::BitVec; use crate::types::test_type; -#[test] -fn test_bit_params() { +#[tokio::test] +async fn test_bit_params() { let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); bv.pop(); bv.pop(); test_type( "BIT(14)", - &[(Some(bv), "B'01101001000001'"), (None, "NULL")], + vec![(Some(bv), "B'01101001000001'"), (None, "NULL")], ) + .await } -#[test] -fn test_varbit_params() { +#[tokio::test] +async fn test_varbit_params() { let mut bv = BitVec::from_bytes(&[0b0110_1001, 0b0000_0111]); bv.pop(); bv.pop(); test_type( "VARBIT", - &[ + vec![ (Some(bv), "B'01101001000001'"), (Some(BitVec::from_bytes(&[])), "B''"), (None, "NULL"), ], ) + .await } diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index eb0018065..e36aeafa2 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -3,8 +3,8 @@ use tokio_postgres::types::{Date, Timestamp}; use crate::types::test_type; -#[test] -fn test_naive_date_time_params() { +#[tokio::test] +async fn test_naive_date_time_params() { fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), @@ -13,17 +13,17 @@ fn test_naive_date_time_params() { } test_type( "TIMESTAMP", - &[ + vec![ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), (None, "NULL"), ], - ); + ).await; } -#[test] -fn test_with_special_naive_date_time_params() { +#[tokio::test] +async fn test_with_special_naive_date_time_params() { fn make_check(time: &str) -> (Timestamp, &str) { ( Timestamp::Value( @@ -34,18 +34,19 @@ fn test_with_special_naive_date_time_params() { } test_type( "TIMESTAMP", - &[ + vec![ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], - ); + ) + .await; } -#[test] -fn test_date_time_params() { +#[tokio::test] +async fn test_date_time_params() { fn make_check(time: &str) -> (Option>, &str) { ( Some( @@ -57,17 +58,18 @@ fn test_date_time_params() { } test_type( "TIMESTAMP WITH TIME ZONE", - &[ + vec![ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_with_special_date_time_params() { +#[tokio::test] +async fn test_with_special_date_time_params() { fn make_check(time: &str) -> (Timestamp>, &str) { ( Timestamp::Value( @@ -79,18 +81,19 @@ fn test_with_special_date_time_params() { } test_type( "TIMESTAMP WITH TIME ZONE", - &[ + vec![ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], - ); + ) + .await; } -#[test] -fn test_date_params() { +#[tokio::test] +async fn test_date_params() { fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveDate::parse_from_str(time, "'%Y-%m-%d'").unwrap()), @@ -99,17 +102,18 @@ fn test_date_params() { } test_type( "DATE", - &[ + vec![ make_check("'1970-01-01'"), make_check("'1965-09-25'"), make_check("'2010-02-09'"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_with_special_date_params() { +#[tokio::test] +async fn test_with_special_date_params() { fn make_check(date: &str) -> (Date, &str) { ( Date::Value(NaiveDate::parse_from_str(date, "'%Y-%m-%d'").unwrap()), @@ -118,18 +122,19 @@ fn test_with_special_date_params() { } test_type( "DATE", - &[ + vec![ make_check("'1970-01-01'"), make_check("'1965-09-25'"), make_check("'2010-02-09'"), (Date::PosInfinity, "'infinity'"), (Date::NegInfinity, "'-infinity'"), ], - ); + ) + .await; } -#[test] -fn test_time_params() { +#[tokio::test] +async fn test_time_params() { fn make_check(time: &str) -> (Option, &str) { ( Some(NaiveTime::parse_from_str(time, "'%H:%M:%S.%f'").unwrap()), @@ -138,11 +143,12 @@ fn test_time_params() { } test_type( "TIME", - &[ + vec![ make_check("'00:00:00.010000000'"), make_check("'11:19:33.100314000'"), make_check("'23:11:45.120200000'"), (None, "NULL"), ], - ); + ) + .await; } diff --git a/tokio-postgres/tests/test/types/eui48_04.rs b/tokio-postgres/tests/test/types/eui48_04.rs index a881e24f0..0cfb7cb58 100644 --- a/tokio-postgres/tests/test/types/eui48_04.rs +++ b/tokio-postgres/tests/test/types/eui48_04.rs @@ -2,11 +2,11 @@ use eui48_04::MacAddress; use crate::types::test_type; -#[test] -fn test_eui48_params() { +#[tokio::test] +async fn test_eui48_params() { test_type( "MACADDR", - &[ + vec![ ( Some(MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), "'12-34-56-ab-cd-ef'", @@ -14,4 +14,5 @@ fn test_eui48_params() { (None, "NULL"), ], ) + .await } diff --git a/tokio-postgres/tests/test/types/geo_010.rs b/tokio-postgres/tests/test/types/geo_010.rs index a782a7362..ecb1cbfcd 100644 --- a/tokio-postgres/tests/test/types/geo_010.rs +++ b/tokio-postgres/tests/test/types/geo_010.rs @@ -2,23 +2,24 @@ use geo_010::{Coordinate, LineString, Point, Rect}; use crate::types::test_type; -#[test] -fn test_point_params() { +#[tokio::test] +async fn test_point_params() { test_type( "POINT", - &[ + vec![ (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_box_params() { +#[tokio::test] +async fn test_box_params() { test_type( "BOX", - &[ + vec![ ( Some(Rect { min: Coordinate { x: -3.14, y: 1.618 }, @@ -31,11 +32,12 @@ fn test_box_params() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_path_params() { +#[tokio::test] +async fn test_path_params() { let points = vec![ Coordinate { x: 0., y: 0. }, Coordinate { x: -3.14, y: 1.618 }, @@ -46,12 +48,13 @@ fn test_path_params() { ]; test_type( "PATH", - &[ + vec![ ( Some(LineString(points)), "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", ), (None, "NULL"), ], - ); + ) + .await; } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 18858d568..8332a9595 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -1,4 +1,4 @@ -use futures::{Future, Stream}; +use futures::{TryStreamExt}; use std::collections::HashMap; use std::error::Error; use std::f32; @@ -7,7 +7,6 @@ use std::fmt; use std::net::IpAddr; use std::result; use std::time::{Duration, UNIX_EPOCH}; -use tokio::runtime::current_thread::Runtime; use tokio_postgres::to_sql_checked; use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; @@ -26,53 +25,63 @@ mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; -fn test_type(sql_type: &str, checks: &[(T, S)]) +async fn test_type(sql_type: &str, checks: Vec<(T, S)>) where T: PartialEq + for<'a> FromSqlOwned + ToSql, S: fmt::Display, { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - for &(ref val, ref repr) in checks.iter() { - let prepare = client.prepare(&format!("SELECT {}::{}", repr, sql_type)); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); + let mut client = connect("user=postgres").await; + + for (val, repr) in checks { + let stmt = client + .prepare(&format!("SELECT {}::{}", repr, sql_type)) + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); let result = rows[0].get(0); - assert_eq!(val, &result); - - let prepare = client.prepare(&format!("SELECT $1::{}", sql_type)); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[val]).collect(); - let rows = runtime.block_on(query).unwrap(); + assert_eq!(val, result); + + let stmt = client + .prepare(&format!("SELECT $1::{}", sql_type)) + .await + .unwrap(); + let rows = client + .query(&stmt, &[&val]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); let result = rows[0].get(0); - assert_eq!(val, &result); + assert_eq!(val, result); } } -#[test] -fn test_bool_params() { +#[tokio::test] +async fn test_bool_params() { test_type( "BOOL", - &[(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], - ); + vec![(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], + ) + .await; } -#[test] -fn test_i8_params() { - test_type("\"char\"", &[(Some('a' as i8), "'a'"), (None, "NULL")]); +#[tokio::test] +async fn test_i8_params() { + test_type("\"char\"", vec![(Some('a' as i8), "'a'"), (None, "NULL")]).await; } -#[test] -fn test_name_params() { +#[tokio::test] +async fn test_name_params() { test_type( "NAME", - &[ + vec![ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -80,88 +89,95 @@ fn test_name_params() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_i16_params() { +#[tokio::test] +async fn test_i16_params() { test_type( "SMALLINT", - &[ + vec![ (Some(15001i16), "15001"), (Some(-15001i16), "-15001"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_i32_params() { +#[tokio::test] +async fn test_i32_params() { test_type( "INT", - &[ + vec![ (Some(2_147_483_548i32), "2147483548"), (Some(-2_147_483_548i32), "-2147483548"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_oid_params() { +#[tokio::test] +async fn test_oid_params() { test_type( "OID", - &[ + vec![ (Some(2_147_483_548u32), "2147483548"), (Some(4_000_000_000), "4000000000"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_i64_params() { +#[tokio::test] +async fn test_i64_params() { test_type( "BIGINT", - &[ + vec![ (Some(9_223_372_036_854_775_708i64), "9223372036854775708"), (Some(-9_223_372_036_854_775_708i64), "-9223372036854775708"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_f32_params() { +#[tokio::test] +async fn test_f32_params() { test_type( "REAL", - &[ + vec![ (Some(f32::INFINITY), "'infinity'"), (Some(f32::NEG_INFINITY), "'-infinity'"), (Some(1000.55), "1000.55"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_f64_params() { +#[tokio::test] +async fn test_f64_params() { test_type( "DOUBLE PRECISION", - &[ + vec![ (Some(f64::INFINITY), "'infinity'"), (Some(f64::NEG_INFINITY), "'-infinity'"), (Some(10000.55), "10000.55"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_varchar_params() { +#[tokio::test] +async fn test_varchar_params() { test_type( "VARCHAR", - &[ + vec![ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -169,14 +185,15 @@ fn test_varchar_params() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_text_params() { +#[tokio::test] +async fn test_text_params() { test_type( "TEXT", - &[ + vec![ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -184,138 +201,149 @@ fn test_text_params() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_borrowed_text() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let prepare = client.prepare("SELECT 'foo'"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); +#[tokio::test] +async fn test_borrowed_text() { + let mut client = connect("user=postgres").await; + + let stmt = client.prepare("SELECT 'foo'").await.unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); let s: &str = rows[0].get(0); assert_eq!(s, "foo"); } -#[test] -fn test_bpchar_params() { - let mut runtime = Runtime::new().unwrap(); +#[tokio::test] +async fn test_bpchar_params() { + let mut client = connect("user=postgres").await; - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let batch = client - .simple_query( + let stmt = client + .prepare( "CREATE TEMPORARY TABLE foo ( id SERIAL PRIMARY KEY, b CHAR(5) )", ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); - - let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); - let stmt = runtime.block_on(prepare).unwrap(); - let execute = client.execute(&stmt, &[&"12345", &"123", &None::<&'static str>]); - runtime.block_on(execute).unwrap(); - - let prepare = client.prepare("SELECT b FROM foo ORDER BY id"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let res = runtime.block_on(query).unwrap(); + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); + + let stmt = client + .prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)") + .await + .unwrap(); + client + .execute(&stmt, &[&"12345", &"123", &None::<&'static str>]) + .await + .unwrap(); + + let stmt = client + .prepare("SELECT b FROM foo ORDER BY id") + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .map_ok(|row| row.get(0)) + .try_collect::>>() + .await + .unwrap(); assert_eq!( vec![Some("12345".to_owned()), Some("123 ".to_owned()), None], - res.iter().map(|row| row.get(0)).collect::>() + rows, ); } -#[test] -fn test_citext_params() { - let mut runtime = Runtime::new().unwrap(); +#[tokio::test] +async fn test_citext_params() { + let mut client = connect("user=postgres").await; - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let batch = client - .simple_query( + let stmt = client + .prepare( "CREATE TEMPORARY TABLE foo ( id SERIAL PRIMARY KEY, b CITEXT )", ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); - - let prepare = client.prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)"); - let stmt = runtime.block_on(prepare).unwrap(); - let execute = client.execute(&stmt, &[&"foobar", &"FooBar", &None::<&'static str>]); - runtime.block_on(execute).unwrap(); - - let prepare = client.prepare("SELECT b FROM foo WHERE b = 'FOOBAR' ORDER BY id"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let res = runtime.block_on(query).unwrap(); - - assert_eq!( - vec!["foobar".to_string(), "FooBar".to_string()], - res.iter() - .map(|row| row.get::<_, String>(0)) - .collect::>() - ); + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); + + let stmt = client + .prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)") + .await + .unwrap(); + client + .execute(&stmt, &[&"foobar", &"FooBar", &None::<&'static str>]) + .await + .unwrap(); + + let stmt = client + .prepare("SELECT b FROM foo WHERE b = 'FOOBAR' ORDER BY id") + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .map_ok(|row| row.get(0)) + .try_collect::>() + .await + .unwrap(); + + assert_eq!(vec!["foobar".to_string(), "FooBar".to_string()], rows,); } -#[test] -fn test_bytea_params() { +#[tokio::test] +async fn test_bytea_params() { test_type( "BYTEA", - &[ + vec![ (Some(vec![0u8, 1, 2, 3, 254, 255]), "'\\x00010203feff'"), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_borrowed_bytea() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let prepare = client.prepare("SELECT 'foo'::BYTEA"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); +#[tokio::test] +async fn test_borrowed_bytea() { + let mut client = connect("user=postgres").await; + let stmt = client.prepare("SELECT 'foo'::BYTEA").await.unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); let s: &[u8] = rows[0].get(0); assert_eq!(s, b"foo"); } -#[test] -fn test_hstore_params() { - macro_rules! make_map { - ($($k:expr => $v:expr),+) => ({ - let mut map = HashMap::new(); - $(map.insert($k, $v);)+ - map - }) - } +macro_rules! make_map { + ($($k:expr => $v:expr),+) => ({ + let mut map = HashMap::new(); + $(map.insert($k, $v);)+ + map + }) +} + +#[tokio::test] +async fn test_hstore_params() { test_type( "hstore", - &[ + vec![ ( Some(make_map!("a".to_owned() => Some("1".to_owned()))), "'a=>1'", @@ -328,149 +356,149 @@ fn test_hstore_params() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn test_array_params() { +#[tokio::test] +async fn test_array_params() { test_type( "integer[]", - &[ + vec![ (Some(vec![1i32, 2i32]), "ARRAY[1,2]"), (Some(vec![1i32]), "ARRAY[1]"), (Some(vec![]), "ARRAY[]"), (None, "NULL"), ], - ); + ) + .await; } #[allow(clippy::eq_op)] -fn test_nan_param(sql_type: &str) +async fn test_nan_param(sql_type: &str) where T: PartialEq + ToSql + FromSqlOwned, { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let prepare = client.prepare(&format!("SELECT 'NaN'::{}", sql_type)); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); + let mut client = connect("user=postgres").await; + + let stmt = client + .prepare(&format!("SELECT 'NaN'::{}", sql_type)) + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); let val: T = rows[0].get(0); assert!(val != val); } -#[test] -fn test_f32_nan_param() { - test_nan_param::("REAL"); +#[tokio::test] +async fn test_f32_nan_param() { + test_nan_param::("REAL").await; } -#[test] -fn test_f64_nan_param() { - test_nan_param::("DOUBLE PRECISION"); +#[tokio::test] +async fn test_f64_nan_param() { + test_nan_param::("DOUBLE PRECISION").await; } -#[test] -fn test_pg_database_datname() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); - - let prepare = client.prepare("SELECT datname FROM pg_database"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); +#[tokio::test] +async fn test_pg_database_datname() { + let mut client = connect("user=postgres").await; + let stmt = client + .prepare("SELECT datname FROM pg_database") + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); assert_eq!(rows[0].get::<_, &str>(0), "postgres"); } -#[test] -fn test_slice() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); +#[tokio::test] +async fn test_slice() { + let mut client = connect("user=postgres").await; - let batch = client - .simple_query( + let stmt = client + .prepare( "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - f TEXT - ); - - INSERT INTO foo(f) VALUES ('a'), ('b'), ('c'), ('d');", + id SERIAL PRIMARY KEY, + f TEXT + )", ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); - - let prepare = client.prepare("SELECT f FROM foo WHERE id = ANY($1)"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); + + let stmt = client + .prepare("INSERT INTO foo (f) VALUES ('a'), ('b'), ('c'), ('d')") + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); + + let stmt = client + .prepare("SELECT f FROM foo WHERE id = ANY($1)") + .await + .unwrap(); + let rows = client .query(&stmt, &[&&[1i32, 3, 4][..]]) - .map(|r| r.get::<_, String>(0)) - .collect(); - let rows = runtime.block_on(query).unwrap(); + .await + .unwrap() + .map_ok(|r| r.get(0)) + .try_collect::>() + .await + .unwrap(); assert_eq!(vec!["a".to_owned(), "c".to_owned(), "d".to_owned()], rows); } -#[test] -fn test_slice_wrong_type() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); +#[tokio::test] +async fn test_slice_wrong_type() { + let mut client = connect("user=postgres").await; - let batch = client - .simple_query( + let stmt = client + .prepare( "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY - )", + id SERIAL PRIMARY KEY + )", ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); - - let prepare = client.prepare("SELECT * FROM foo WHERE id = ANY($1)"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[&&[&"hi"][..]]).collect(); - let err = runtime.block_on(query).err().unwrap(); + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); + + let stmt = client + .prepare("SELECT * FROM foo WHERE id = ANY($1)") + .await + .unwrap(); + let err = client.query(&stmt, &[&&[&"hi"][..]]).await.err().unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), }; } -#[test] -fn test_slice_range() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); +#[tokio::test] +async fn test_slice_range() { + let mut client = connect("user=postgres").await; - let prepare = client.prepare("SELECT $1::INT8RANGE"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[&&[&1i64][..]]).collect(); - let err = runtime.block_on(query).err().unwrap(); + let stmt = client.prepare("SELECT $1::INT8RANGE").await.unwrap(); + let err = client.query(&stmt, &[&&[&1i64][..]]).await.err().unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), }; } -#[test] -fn domain() { +#[tokio::test] +async fn domain() { #[derive(Debug, PartialEq)] struct SessionId(Vec); @@ -509,57 +537,56 @@ fn domain() { } } - let mut runtime = Runtime::new().unwrap(); + let mut client = connect("user=postgres").await; - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let stmt = client + .prepare("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); - let batch = client - .simple_query( - "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); - CREATE TABLE pg_temp.foo (id pg_temp.session_id);", - ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); + let stmt = client + .prepare("CREATE TABLE pg_temp.foo (id pg_temp.session_id)") + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); let id = SessionId(b"0123456789abcdef".to_vec()); - let prepare = client.prepare("INSERT INTO pg_temp.foo (id) VALUES ($1)"); - let stmt = runtime.block_on(prepare).unwrap(); - let execute = client.execute(&stmt, &[&id]); - runtime.block_on(execute).unwrap(); - - let prepare = client.prepare("SELECT id FROM pg_temp.foo"); - let stmt = runtime.block_on(prepare).unwrap(); - let query = client.query(&stmt, &[]).collect(); - let rows = runtime.block_on(query).unwrap(); + let stmt = client + .prepare("INSERT INTO pg_temp.foo (id) VALUES ($1)") + .await + .unwrap(); + client.execute(&stmt, &[&id]).await.unwrap(); + + let stmt = client.prepare("SELECT id FROM pg_temp.foo").await.unwrap(); + let rows = client + .query(&stmt, &[]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); assert_eq!(id, rows[0].get(0)); } -#[test] -fn composite() { - let mut runtime = Runtime::new().unwrap(); - - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); +#[tokio::test] +async fn composite() { + let mut client = connect("user=postgres").await; - let batch = client - .simple_query( + let stmt = client + .prepare( "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", + name TEXT, + supplier INTEGER, + price NUMERIC + )", ) - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); - let prepare = client.prepare("SELECT $1::inventory_item"); - let stmt = runtime.block_on(prepare).unwrap(); + let stmt = client.prepare("SELECT $1::inventory_item").await.unwrap(); let type_ = &stmt.params()[0]; assert_eq!(type_.name(), "inventory_item"); match *type_.kind() { @@ -575,22 +602,17 @@ fn composite() { } } -#[test] -fn enum_() { - let mut runtime = Runtime::new().unwrap(); +#[tokio::test] +async fn enum_() { + let mut client = connect("user=postgres").await; - let handshake = connect("user=postgres"); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let stmt = client + .prepare("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy')") + .await + .unwrap(); + client.execute(&stmt, &[]).await.unwrap(); - let batch = client - .simple_query("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy');") - .for_each(|_| Ok(())); - runtime.block_on(batch).unwrap(); - - let prepare = client.prepare("SELECT $1::mood"); - let stmt = runtime.block_on(prepare).unwrap(); + let stmt = client.prepare("SELECT $1::mood").await.unwrap(); let type_ = &stmt.params()[0]; assert_eq!(type_.name(), "mood"); match *type_.kind() { @@ -604,11 +626,11 @@ fn enum_() { } } -#[test] -fn system_time() { +#[tokio::test] +async fn system_time() { test_type( "TIMESTAMP", - &[ + vec![ ( Some(UNIX_EPOCH + Duration::from_millis(1_010)), "'1970-01-01 00:00:01.01'", @@ -623,14 +645,15 @@ fn system_time() { ), (None, "NULL"), ], - ); + ) + .await; } -#[test] -fn inet() { +#[tokio::test] +async fn inet() { test_type( "INET", - &[ + vec![ (Some("127.0.0.1".parse::().unwrap()), "'127.0.0.1'"), ( Some("127.0.0.1".parse::().unwrap()), @@ -653,5 +676,6 @@ fn inet() { "'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128'", ), ], - ); + ) + .await; } diff --git a/tokio-postgres/tests/test/types/serde_json_1.rs b/tokio-postgres/tests/test/types/serde_json_1.rs index 37d431f6e..f48c188f2 100644 --- a/tokio-postgres/tests/test/types/serde_json_1.rs +++ b/tokio-postgres/tests/test/types/serde_json_1.rs @@ -2,11 +2,11 @@ use serde_json_1::Value; use crate::types::test_type; -#[test] -fn test_json_params() { +#[tokio::test] +async fn test_json_params() { test_type( "JSON", - &[ + vec![ ( Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), "'[10, 11, 12]'", @@ -18,13 +18,14 @@ fn test_json_params() { (None, "NULL"), ], ) + .await } -#[test] -fn test_jsonb_params() { +#[tokio::test] +async fn test_jsonb_params() { test_type( "JSONB", - &[ + vec![ ( Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), "'[10, 11, 12]'", @@ -36,4 +37,5 @@ fn test_jsonb_params() { (None, "NULL"), ], ) + .await } diff --git a/tokio-postgres/tests/test/types/uuid_07.rs b/tokio-postgres/tests/test/types/uuid_07.rs index 1d51a8ad9..8e60fe3a4 100644 --- a/tokio-postgres/tests/test/types/uuid_07.rs +++ b/tokio-postgres/tests/test/types/uuid_07.rs @@ -2,11 +2,11 @@ use uuid_07::Uuid; use crate::types::test_type; -#[test] -fn test_uuid_params() { +#[tokio::test] +async fn test_uuid_params() { test_type( "UUID", - &[ + vec![ ( Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", @@ -14,4 +14,5 @@ fn test_uuid_params() { (None, "NULL"), ], ) + .await } From e77b0fd0090c60812384f9610524b8b06b3b572f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Jul 2019 15:52:46 -0700 Subject: [PATCH 197/819] Add simple_query and batch_execute --- tokio-postgres/src/client.rs | 67 +++++++++- tokio-postgres/src/lib.rs | 13 ++ tokio-postgres/src/prepare.rs | 6 +- tokio-postgres/src/query.rs | 1 - tokio-postgres/src/simple_query.rs | 133 +++++++++++++++++++ tokio-postgres/tests/test/runtime.rs | 8 +- tokio-postgres/tests/test/types/chrono_04.rs | 3 +- tokio-postgres/tests/test/types/mod.rs | 73 +++++----- 8 files changed, 251 insertions(+), 53 deletions(-) create mode 100644 tokio-postgres/src/simple_query.rs diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index e49c68da3..1eb60453a 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,12 +1,13 @@ use crate::codec::BackendMessages; use crate::connection::{Request, RequestMessages}; -use crate::prepare::prepare; use crate::query::{self, Query}; +use crate::simple_query; use crate::types::{Oid, ToSql, Type}; +use crate::{prepare, SimpleQueryMessage}; use crate::{Error, Statement}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::future; +use futures::{future, Stream}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; @@ -131,18 +132,31 @@ impl Client { self.inner.clone() } + /// Creates a new prepared statement. + /// + /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), + /// which are set when executed. Prepared statements can only be used with the connection that created them. pub fn prepare(&mut self, query: &str) -> impl Future> { self.prepare_typed(query, &[]) } + /// Like `prepare`, but allows the types of query parameters to be explicitly specified. + /// + /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be + /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. pub fn prepare_typed( &mut self, query: &str, parameter_types: &[Type], ) -> impl Future> { - prepare(self.inner(), query, parameter_types) + prepare::prepare(self.inner(), query, parameter_types) } + /// Executes a statement, returning a stream of the resulting rows. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn query( &mut self, statement: &Statement, @@ -152,6 +166,9 @@ impl Client { query::query(self.inner(), statement.clone(), buf) } + /// Like [`query`], but takes an iterator of parameters rather than a slice. + /// + /// [`query`]: #method.query pub fn query_iter<'a, I>( &mut self, statement: &Statement, @@ -165,6 +182,13 @@ impl Client { query::query(self.inner(), statement.clone(), buf) } + /// Executes a statement, returning the number of rows modified. + /// + /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. pub fn execute( &mut self, statement: &Statement, @@ -174,6 +198,9 @@ impl Client { query::execute(self.inner(), buf) } + /// Like [`execute`], but takes an iterator of parameters rather than a slice. + /// + /// [`execute`]: #method.execute pub fn execute_iter<'a, I>( &mut self, statement: &Statement, @@ -186,4 +213,38 @@ impl Client { let buf = query::encode(statement, params); query::execute(self.inner(), buf) } + + /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a stream over the + /// rows, this method returns a stream over an enum which indicates either the completion of one of the commands, + /// or a row of data. This preserves the framing between the separate statements in the request. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! + pub fn simple_query( + &mut self, + query: &str, + ) -> impl Stream> { + simple_query::simple_query(self.inner(), query) + } + + /// Executes a sequence of SQL statements using the simple query protocol. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. This is intended for use when, for example, initializing a database schema. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! + pub fn batch_execute(&mut self, query: &str) -> impl Future> { + simple_query::batch_execute(self.inner(), query) + } } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 3a9e298ac..0b01d322d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -140,6 +140,7 @@ mod maybe_tls_stream; mod prepare; mod query; pub mod row; +mod simple_query; #[cfg(feature = "runtime")] mod socket; mod statement; @@ -188,6 +189,18 @@ pub enum AsyncMessage { __NonExhaustive, } +/// Message returned by the `SimpleQuery` stream. +pub enum SimpleQueryMessage { + /// A row of data. + Row(SimpleQueryRow), + /// A statement in the query has completed. + /// + /// The number of rows modified or selected is returned. + CommandComplete(u64), + #[doc(hidden)] + __NonExhaustive, +} + impl Notification { /// The process ID of the notifying backend process. pub fn process_id(&self) -> i32 { diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 8cc880915..083e539c0 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -6,7 +6,7 @@ use crate::query; use crate::types::{Field, Kind, Oid, ToSql, Type}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; -use futures::{future, StreamExt, TryStreamExt}; +use futures::{future, TryStreamExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; @@ -14,8 +14,6 @@ use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -const TYPEINFO_STMT: &str = "typeinfo"; - const TYPEINFO_QUERY: &str = "\ SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid FROM pg_catalog.pg_type t @@ -62,7 +60,7 @@ pub fn prepare( client: Arc, query: &str, types: &[Type], -) -> impl Future> { +) -> impl Future> + 'static { let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); let buf = encode(&name, query, types); diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index e9702f91a..1b6cf18bd 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -6,7 +6,6 @@ use crate::{Error, Row, Statement}; use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; -use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs new file mode 100644 index 000000000..06891567c --- /dev/null +++ b/tokio-postgres/src/simple_query.rs @@ -0,0 +1,133 @@ +use crate::client::{InnerClient, Responses}; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; +use fallible_iterator::FallibleIterator; +use futures::{ready, Stream}; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::future::Future; +use std::mem; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +pub fn simple_query( + client: Arc, + query: &str, +) -> impl Stream> { + let buf = encode(query); + SimpleQuery::Start { client, buf } +} + +pub fn batch_execute( + client: Arc, + query: &str, +) -> impl Future> { + let buf = encode(query); + + async move { + let buf = buf?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + loop { + match responses.next().await? { + Message::ReadyForQuery(_) => return Ok(()), + Message::CommandComplete(_) + | Message::EmptyQueryResponse + | Message::RowDescription(_) + | Message::DataRow(_) => {} + _ => return Err(Error::unexpected_message()), + } + } + } +} + +fn encode(query: &str) -> Result, Error> { + let mut buf = vec![]; + frontend::query(query, &mut buf).map_err(Error::encode)?; + Ok(buf) +} + +enum SimpleQuery { + Start { + client: Arc, + buf: Result, Error>, + }, + Reading { + responses: Responses, + columns: Option>, + }, + Done, +} + +impl Stream for SimpleQuery { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match mem::replace(&mut *self, SimpleQuery::Done) { + SimpleQuery::Start { client, buf } => { + let buf = buf?; + let responses = + client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + *self = SimpleQuery::Reading { + responses, + columns: None, + }; + } + SimpleQuery::Reading { + mut responses, + columns, + } => match ready!(responses.poll_next(cx)?) { + Message::CommandComplete(body) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + *self = SimpleQuery::Reading { + responses, + columns: None, + }; + return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); + } + Message::EmptyQueryResponse => { + *self = SimpleQuery::Reading { + responses, + columns: None, + }; + return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); + } + Message::RowDescription(body) => { + let columns = body + .fields() + .map(|f| Ok(f.name().to_string())) + .collect::>() + .map_err(Error::parse)? + .into(); + *self = SimpleQuery::Reading { + responses, + columns: Some(columns), + }; + } + Message::DataRow(body) => { + let row = match &columns { + Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, + None => return Poll::Ready(Some(Err(Error::unexpected_message()))), + }; + *self = SimpleQuery::Reading { responses, columns }; + return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))); + } + Message::ReadyForQuery(_) => return Poll::Ready(None), + _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), + }, + SimpleQuery::Done => return Poll::Ready(None), + } + } + } +} diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index b66f81fc7..d04b0153c 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -10,7 +10,13 @@ async fn smoke_test(s: &str) { tokio::spawn(connection); let stmt = client.prepare("SELECT $1::INT").await.unwrap(); - let rows = client.query(&stmt, &[&1i32]).await.unwrap().try_collect::>().await.unwrap(); + let rows = client + .query(&stmt, &[&1i32]) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); assert_eq!(rows[0].get::<_, i32>(0), 1i32); } diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index e36aeafa2..a48346bc4 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -19,7 +19,8 @@ async fn test_naive_date_time_params() { make_check("'2010-02-09 23:11:45.120200000'"), (None, "NULL"), ], - ).await; + ) + .await; } #[tokio::test] diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 8332a9595..e755b12e6 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -1,4 +1,4 @@ -use futures::{TryStreamExt}; +use futures::TryStreamExt; use std::collections::HashMap; use std::error::Error; use std::f32; @@ -225,8 +225,8 @@ async fn test_borrowed_text() { async fn test_bpchar_params() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare( + client + .batch_execute( "CREATE TEMPORARY TABLE foo ( id SERIAL PRIMARY KEY, b CHAR(5) @@ -234,7 +234,6 @@ async fn test_bpchar_params() { ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client .prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)") @@ -268,8 +267,8 @@ async fn test_bpchar_params() { async fn test_citext_params() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare( + client + .batch_execute( "CREATE TEMPORARY TABLE foo ( id SERIAL PRIMARY KEY, b CITEXT @@ -277,7 +276,6 @@ async fn test_citext_params() { ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client .prepare("INSERT INTO foo (b) VALUES ($1), ($2), ($3)") @@ -427,22 +425,16 @@ async fn test_pg_database_datname() { async fn test_slice() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare( + client + .batch_execute( "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY, - f TEXT - )", + id SERIAL PRIMARY KEY, + f TEXT + ); + INSERT INTO foo (f) VALUES ('a'), ('b'), ('c'), ('d');", ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); - - let stmt = client - .prepare("INSERT INTO foo (f) VALUES ('a'), ('b'), ('c'), ('d')") - .await - .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client .prepare("SELECT f FROM foo WHERE id = ANY($1)") @@ -464,15 +456,14 @@ async fn test_slice() { async fn test_slice_wrong_type() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare( + client + .batch_execute( "CREATE TEMPORARY TABLE foo ( - id SERIAL PRIMARY KEY - )", + id SERIAL PRIMARY KEY + )", ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client .prepare("SELECT * FROM foo WHERE id = ANY($1)") @@ -539,17 +530,15 @@ async fn domain() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") - .await - .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); - - let stmt = client - .prepare("CREATE TABLE pg_temp.foo (id pg_temp.session_id)") + client + .batch_execute( + " + CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16); + CREATE TABLE pg_temp.foo (id pg_temp.session_id); + ", + ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let id = SessionId(b"0123456789abcdef".to_vec()); @@ -574,17 +563,16 @@ async fn domain() { async fn composite() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare( + client + .batch_execute( "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", + name TEXT, + supplier INTEGER, + price NUMERIC + )", ) .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client.prepare("SELECT $1::inventory_item").await.unwrap(); let type_ = &stmt.params()[0]; @@ -606,11 +594,10 @@ async fn composite() { async fn enum_() { let mut client = connect("user=postgres").await; - let stmt = client - .prepare("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy')") + client + .batch_execute("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy')") .await .unwrap(); - client.execute(&stmt, &[]).await.unwrap(); let stmt = client.prepare("SELECT $1::mood").await.unwrap(); let type_ = &stmt.params()[0]; From 07e5930ddb4cfba81da1c081b991c15c5391190f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Jul 2019 16:02:34 -0700 Subject: [PATCH 198/819] Handle target_session_attrs --- tokio-postgres/src/connect.rs | 25 ++++++++++++++++++++++--- tokio-postgres/tests/test/runtime.rs | 7 +++++-- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 8bb234d0d..a17b45a9f 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -2,7 +2,9 @@ use crate::config::{Host, TargetSessionAttrs}; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; -use crate::{Client, Config, Connection, Error, Socket}; +use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; +use futures::TryStreamExt; +use std::io; pub async fn connect( mut tls: T, @@ -50,10 +52,27 @@ where T: TlsConnect, { let socket = connect_socket(idx, config).await?; - let (client, connection) = connect_raw(socket, tls, config, Some(idx)).await?; + let (mut client, connection) = connect_raw(socket, tls, config, Some(idx)).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { - unimplemented!() + let mut rows = client.simple_query("SHOW transaction_read_only"); + + loop { + match rows.try_next().await? { + Some(SimpleQueryMessage::Row(row)) => { + if row.try_get(0)? == Some("on") { + return Err(Error::connect(io::Error::new( + io::ErrorKind::PermissionDenied, + "database does not allow writes", + ))); + } else { + break; + } + } + Some(_) => {} + None => return Err(Error::unexpected_message()), + } + } } Ok((client, connection)) diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index d04b0153c..e4038b0cc 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -49,7 +49,6 @@ async fn wrong_port_count() { .unwrap(); } -/* #[tokio::test] async fn target_session_attrs_ok() { tokio_postgres::connect( @@ -67,9 +66,13 @@ async fn target_session_attrs_err() { "host=localhost port=5433 user=postgres target_session_attrs=read-write options='-c default_transaction_read_only=on'", NoTls, - ).await.err().unwrap(); + ) + .await + .err() + .unwrap(); } +/* #[test] fn cancel_query() { let mut runtime = Runtime::new().unwrap(); From 9938ffff1abc76764021056631763584aa71ce87 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Jul 2019 16:34:07 -0700 Subject: [PATCH 199/819] Test and fix simple_query --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/connect.rs | 4 +- tokio-postgres/src/simple_query.rs | 119 ++++++++++--------------- tokio-postgres/tests/test/main.rs | 138 +++++++++++++---------------- 4 files changed, 115 insertions(+), 147 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 05d334353..5b8b99bee 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -37,6 +37,7 @@ futures-preview = { version = "0.3.0-alpha.17", features = ["nightly", "async-aw log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" +pin-utils = "0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } tokio = { git = "https://github.com/tokio-rs/tokio", default-features = false, features = ["io", "codec"] } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index a17b45a9f..33c8ed255 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1,4 +1,5 @@ use crate::config::{Host, TargetSessionAttrs}; +use pin_utils::pin_mut; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; @@ -55,7 +56,8 @@ where let (mut client, connection) = connect_raw(socket, tls, config, Some(idx)).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { - let mut rows = client.simple_query("SHOW transaction_read_only"); + let rows = client.simple_query("SHOW transaction_read_only"); + pin_mut!(rows); loop { match rows.try_next().await? { diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 06891567c..daab2077e 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -3,11 +3,10 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use fallible_iterator::FallibleIterator; -use futures::{ready, Stream}; +use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; -use std::mem; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -17,7 +16,18 @@ pub fn simple_query( query: &str, ) -> impl Stream> { let buf = encode(query); - SimpleQuery::Start { client, buf } + + let start = async move { + let buf = buf?; + let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + Ok(SimpleQuery { + responses, + columns: None, + }) + }; + + start.try_flatten_stream() } pub fn batch_execute( @@ -49,16 +59,9 @@ fn encode(query: &str) -> Result, Error> { Ok(buf) } -enum SimpleQuery { - Start { - client: Arc, - buf: Result, Error>, - }, - Reading { - responses: Responses, - columns: Option>, - }, - Done, +struct SimpleQuery { + responses: Responses, + columns: Option> } impl Stream for SimpleQuery { @@ -66,67 +69,39 @@ impl Stream for SimpleQuery { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { - match mem::replace(&mut *self, SimpleQuery::Done) { - SimpleQuery::Start { client, buf } => { - let buf = buf?; - let responses = - client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - - *self = SimpleQuery::Reading { - responses, - columns: None, + match ready!(self.responses.poll_next(cx)?) { + Message::CommandComplete(body) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); + } + Message::EmptyQueryResponse => { + return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); + } + Message::RowDescription(body) => { + let columns = body + .fields() + .map(|f| Ok(f.name().to_string())) + .collect::>() + .map_err(Error::parse)? + .into(); + self.columns = Some(columns); + } + Message::DataRow(body) => { + let row = match &self.columns { + Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, + None => return Poll::Ready(Some(Err(Error::unexpected_message()))), }; + return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))); } - SimpleQuery::Reading { - mut responses, - columns, - } => match ready!(responses.poll_next(cx)?) { - Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - *self = SimpleQuery::Reading { - responses, - columns: None, - }; - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); - } - Message::EmptyQueryResponse => { - *self = SimpleQuery::Reading { - responses, - columns: None, - }; - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); - } - Message::RowDescription(body) => { - let columns = body - .fields() - .map(|f| Ok(f.name().to_string())) - .collect::>() - .map_err(Error::parse)? - .into(); - *self = SimpleQuery::Reading { - responses, - columns: Some(columns), - }; - } - Message::DataRow(body) => { - let row = match &columns { - Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, - None => return Poll::Ready(Some(Err(Error::unexpected_message()))), - }; - *self = SimpleQuery::Reading { responses, columns }; - return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))); - } - Message::ReadyForQuery(_) => return Poll::Ready(None), - _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), - }, - SimpleQuery::Done => return Poll::Ready(None), + Message::ReadyForQuery(_) => return Poll::Ready(None), + _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), } } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 7bc398764..11bbf446c 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -6,7 +6,7 @@ use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{Client, Config, Connection, Error}; +use tokio_postgres::{Client, Config, Connection, Error, SimpleQueryMessage}; mod parse; #[cfg(feature = "runtime")] @@ -114,12 +114,10 @@ async fn pipelined_prepare() { async fn insert_select() { let mut client = connect("user=postgres").await; - let setup = client - .prepare("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") + client + .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") .await .unwrap(); - client.execute(&setup, &[]).await.unwrap(); - drop(setup); let insert = client.prepare("INSERT INTO foo (name) VALUES ($1), ($2)"); let select = client.prepare("SELECT id, name FROM foo ORDER BY id"); @@ -142,8 +140,8 @@ async fn insert_select() { async fn custom_enum() { let mut client = connect("user=postgres").await; - let create = client - .prepare( + client + .batch_execute( "CREATE TYPE pg_temp.mood AS ENUM ( 'sad', 'ok', @@ -152,7 +150,6 @@ async fn custom_enum() { ) .await .unwrap(); - client.execute(&create, &[]).await.unwrap(); let select = client.prepare("SELECT $1::mood").await.unwrap(); @@ -172,11 +169,10 @@ async fn custom_enum() { async fn custom_domain() { let mut client = connect("user=postgres").await; - let create = client - .prepare("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") + client + .batch_execute("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") .await .unwrap(); - client.execute(&create, &[]).await.unwrap(); let select = client.prepare("SELECT $1::session_id").await.unwrap(); @@ -206,17 +202,16 @@ async fn custom_array() { async fn custom_composite() { let mut client = connect("user=postgres").await; - let create = client - .prepare( + client + .batch_execute( "CREATE TYPE pg_temp.inventory_item AS ( - name TEXT, - supplier INTEGER, - price NUMERIC - )", + name TEXT, + supplier INTEGER, + price NUMERIC + )", ) .await .unwrap(); - client.execute(&create, &[]).await.unwrap(); let select = client.prepare("SELECT $1::inventory_item").await.unwrap(); @@ -239,16 +234,15 @@ async fn custom_composite() { async fn custom_range() { let mut client = connect("user=postgres").await; - let create = client - .prepare( + client + .batch_execute( "CREATE TYPE pg_temp.floatrange AS RANGE ( - subtype = float8, - subtype_diff = float8mi - )", + subtype = float8, + subtype_diff = float8mi + )", ) .await .unwrap(); - client.execute(&create, &[]).await.unwrap(); let select = client.prepare("SELECT $1::floatrange").await.unwrap(); @@ -257,6 +251,52 @@ async fn custom_range() { assert_eq!(&Kind::Range(Type::FLOAT8), ty.kind()); } +#[tokio::test] +async fn simple_query() { + let mut client = connect("user=postgres").await; + + let messages = client + .simple_query( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + ); + INSERT INTO foo (name) VALUES ('steven'), ('joe'); + SELECT * FROM foo ORDER BY id;", + ) + .try_collect::>() + .await + .unwrap(); + + match messages[0] { + SimpleQueryMessage::CommandComplete(0) => {} + _ => panic!("unexpected message"), + } + match messages[1] { + SimpleQueryMessage::CommandComplete(2) => {} + _ => panic!("unexpected message"), + } + match &messages[2] { + SimpleQueryMessage::Row(row) => { + assert_eq!(row.get(0), Some("1")); + assert_eq!(row.get(1), Some("steven")); + } + _ => panic!("unexpected message"), + } + match &messages[3] { + SimpleQueryMessage::Row(row) => { + assert_eq!(row.get(0), Some("2")); + assert_eq!(row.get(1), Some("joe")); + } + _ => panic!("unexpected message"), + } + match messages[4] { + SimpleQueryMessage::CommandComplete(2) => {} + _ => panic!("unexpected message"), + } + assert_eq!(messages.len(), 5); +} + /* #[test] fn query_portal() { @@ -675,56 +715,6 @@ fn transaction_builder_around_moved_client() { runtime.run().unwrap(); } -#[test] -fn simple_query() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let f = client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - ); - INSERT INTO foo (name) VALUES ('steven'), ('joe'); - SELECT * FROM foo ORDER BY id;", - ) - .collect(); - let messages = runtime.block_on(f).unwrap(); - - match messages[0] { - SimpleQueryMessage::CommandComplete(0) => {} - _ => panic!("unexpected message"), - } - match messages[1] { - SimpleQueryMessage::CommandComplete(2) => {} - _ => panic!("unexpected message"), - } - match &messages[2] { - SimpleQueryMessage::Row(row) => { - assert_eq!(row.get(0), Some("1")); - assert_eq!(row.get(1), Some("steven")); - } - _ => panic!("unexpected message"), - } - match &messages[3] { - SimpleQueryMessage::Row(row) => { - assert_eq!(row.get(0), Some("2")); - assert_eq!(row.get(1), Some("joe")); - } - _ => panic!("unexpected message"), - } - match messages[4] { - SimpleQueryMessage::CommandComplete(2) => {} - _ => panic!("unexpected message"), - } - assert_eq!(messages.len(), 5); -} - #[test] fn poll_idle_running() { struct DelayStream(Delay); From dea6c3ebc5e304206ee3157db1a4c6f17467e378 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Jul 2019 17:48:32 -0700 Subject: [PATCH 200/819] Fix target_session_attrs handling --- tokio-postgres/src/client.rs | 8 +++--- tokio-postgres/src/connect.rs | 20 ++++++++++++--- tokio-postgres/src/prepare.rs | 6 ++--- tokio-postgres/src/query.rs | 20 +++++++-------- tokio-postgres/src/simple_query.rs | 2 +- tokio-postgres/tests/test/main.rs | 6 ++--- tokio-postgres/tests/test/runtime.rs | 5 +--- tokio-postgres/tests/test/types/mod.rs | 34 +++++++++----------------- 8 files changed, 49 insertions(+), 52 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 1eb60453a..909246e58 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,9 +1,9 @@ use crate::codec::BackendMessages; use crate::connection::{Request, RequestMessages}; -use crate::query::{self, Query}; -use crate::simple_query; +use crate::query; use crate::types::{Oid, ToSql, Type}; use crate::{prepare, SimpleQueryMessage}; +use crate::{simple_query, Row}; use crate::{Error, Statement}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; @@ -161,7 +161,7 @@ impl Client { &mut self, statement: &Statement, params: &[&dyn ToSql], - ) -> impl Future> { + ) -> impl Stream> { let buf = query::encode(statement, params.iter().cloned()); query::query(self.inner(), statement.clone(), buf) } @@ -173,7 +173,7 @@ impl Client { &mut self, statement: &Statement, params: I, - ) -> impl Future> + ) -> impl Stream> where I: IntoIterator, I::IntoIter: ExactSizeIterator, diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 33c8ed255..587f18578 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1,11 +1,15 @@ use crate::config::{Host, TargetSessionAttrs}; -use pin_utils::pin_mut; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::TryStreamExt; +use futures::{Stream, FutureExt}; +use futures::future; +use pin_utils::pin_mut; use std::io; +use std::future::Future; +use std::task::Poll; +use std::pin::Pin; pub async fn connect( mut tls: T, @@ -53,14 +57,22 @@ where T: TlsConnect, { let socket = connect_socket(idx, config).await?; - let (mut client, connection) = connect_raw(socket, tls, config, Some(idx)).await?; + let (mut client, mut connection) = connect_raw(socket, tls, config, Some(idx)).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { let rows = client.simple_query("SHOW transaction_read_only"); pin_mut!(rows); loop { - match rows.try_next().await? { + let next = future::poll_fn(|cx| { + if connection.poll_unpin(cx)?.is_ready() { + return Poll::Ready(Some(Err(Error::closed()))); + } + + rows.as_mut().poll_next(cx) + }); + + match next.await.transpose()? { Some(SimpleQueryMessage::Row(row)) => { if row.try_get(0)? == Some("on") { return Err(Error::connect(io::Error::new( diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 083e539c0..8be147bb5 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -1,4 +1,5 @@ use crate::client::InnerClient; +use pin_utils::pin_mut; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::error::SqlState; @@ -127,7 +128,8 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { let params: &[&dyn ToSql] = &[&oid]; let buf = query::encode(&stmt, params.iter().cloned()); - let mut rows = query::query(client.clone(), stmt, buf).await?; + let rows = query::query(client.clone(), stmt, buf); + pin_mut!(rows); let row = match rows.try_next().await? { Some(row) => row, @@ -199,7 +201,6 @@ async fn get_enum_variants(client: &Arc, oid: Oid) -> Result, oid: Oid) -> Result>() .await?; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 1b6cf18bd..1f23c5eba 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -3,24 +3,24 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; use crate::{Error, Row, Statement}; -use futures::{ready, Stream}; +use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -pub async fn query( +pub fn query( client: Arc, statement: Statement, buf: Result, Error>, -) -> Result { - let responses = start(client, buf).await?; - - Ok(Query { - statement, - responses, - }) +) -> impl Stream> { + start(client, buf) + .map_ok(|responses| Query { + statement, + responses, + }) + .try_flatten_stream() } pub async fn execute(client: Arc, buf: Result, Error>) -> Result { @@ -103,7 +103,7 @@ where Ok(buf) } -pub struct Query { +struct Query { statement: Statement, responses: Responses, } diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index daab2077e..383dad0ca 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -61,7 +61,7 @@ fn encode(query: &str) -> Result, Error> { struct SimpleQuery { responses: Responses, - columns: Option> + columns: Option>, } impl Stream for SimpleQuery { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 11bbf446c..63012718a 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,7 +1,7 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::{try_join, FutureExt, TryFutureExt, TryStreamExt}; +use futures::{try_join, FutureExt, TryStreamExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; @@ -124,9 +124,7 @@ async fn insert_select() { let (insert, select) = try_join!(insert, select).unwrap(); let insert = client.execute(&insert, &[&"alice", &"bob"]); - let select = client - .query(&select, &[]) - .and_then(|q| q.try_collect::>()); + let select = client.query(&select, &[]).try_collect::>(); let (_, rows) = try_join!(insert, select).unwrap(); assert_eq!(rows.len(), 2); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index e4038b0cc..a11246a55 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -12,8 +12,6 @@ async fn smoke_test(s: &str) { let stmt = client.prepare("SELECT $1::INT").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -51,12 +49,11 @@ async fn wrong_port_count() { #[tokio::test] async fn target_session_attrs_ok() { - tokio_postgres::connect( + let _ = tokio_postgres::connect( "host=localhost port=5433 user=postgres target_session_attrs=read-write", NoTls, ) .await - .err() .unwrap(); } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index e755b12e6..46d451444 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -39,8 +39,6 @@ where .unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -53,8 +51,6 @@ where .unwrap(); let rows = client .query(&stmt, &[&val]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -212,8 +208,6 @@ async fn test_borrowed_text() { let stmt = client.prepare("SELECT 'foo'").await.unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -250,8 +244,6 @@ async fn test_bpchar_params() { .unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .map_ok(|row| row.get(0)) .try_collect::>>() .await @@ -292,8 +284,6 @@ async fn test_citext_params() { .unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .map_ok(|row| row.get(0)) .try_collect::>() .await @@ -320,8 +310,6 @@ async fn test_borrowed_bytea() { let stmt = client.prepare("SELECT 'foo'::BYTEA").await.unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -385,8 +373,6 @@ where .unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -413,8 +399,6 @@ async fn test_pg_database_datname() { .unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); @@ -442,8 +426,6 @@ async fn test_slice() { .unwrap(); let rows = client .query(&stmt, &[&&[1i32, 3, 4][..]]) - .await - .unwrap() .map_ok(|r| r.get(0)) .try_collect::>() .await @@ -469,7 +451,12 @@ async fn test_slice_wrong_type() { .prepare("SELECT * FROM foo WHERE id = ANY($1)") .await .unwrap(); - let err = client.query(&stmt, &[&&[&"hi"][..]]).await.err().unwrap(); + let err = client + .query(&stmt, &[&&[&"hi"][..]]) + .try_collect::>() + .await + .err() + .unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), @@ -481,7 +468,12 @@ async fn test_slice_range() { let mut client = connect("user=postgres").await; let stmt = client.prepare("SELECT $1::INT8RANGE").await.unwrap(); - let err = client.query(&stmt, &[&&[&1i64][..]]).await.err().unwrap(); + let err = client + .query(&stmt, &[&&[&1i64][..]]) + .try_collect::>() + .await + .err() + .unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), @@ -551,8 +543,6 @@ async fn domain() { let stmt = client.prepare("SELECT id FROM pg_temp.foo").await.unwrap(); let rows = client .query(&stmt, &[]) - .await - .unwrap() .try_collect::>() .await .unwrap(); From 00ef0bfd84dc87a568fcd5c33a716b555e3bb35f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Jul 2019 17:50:46 -0700 Subject: [PATCH 201/819] Use smoke_test for target_session_attrs_ok --- tokio-postgres/tests/test/runtime.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index a11246a55..cf26d7705 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -49,12 +49,7 @@ async fn wrong_port_count() { #[tokio::test] async fn target_session_attrs_ok() { - let _ = tokio_postgres::connect( - "host=localhost port=5433 user=postgres target_session_attrs=read-write", - NoTls, - ) - .await - .unwrap(); + smoke_test("host=localhost port=5433 user=postgres target_session_attrs=read-write").await; } #[tokio::test] From 6fac2186ce072b01fee3bd0abdd2ffdc49e476cd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 29 Jul 2019 15:17:45 -0700 Subject: [PATCH 202/819] Fix Windows build? --- tokio-postgres/src/socket.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index 74663cf67..d6e1b84d9 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -10,6 +10,7 @@ use tokio::net::UnixStream; #[derive(Debug)] enum Inner { Tcp(TcpStream), + #[cfg(unix)] Unix(UnixStream), } From 9d851f6727dd5aae1cee5bd202ab9368f2416312 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 29 Jul 2019 18:54:59 -0700 Subject: [PATCH 203/819] Fix doc examples --- tokio-postgres/src/lib.rs | 80 ++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0b01d322d..15d4d1520 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,50 +3,41 @@ //! # Example //! //! ```no_run -//! use futures::{Future, Stream}; -//! use tokio_postgres::NoTls; +//! #![feature(async_await)] +//! +//! use futures::{FutureExt, TryStreamExt}; +//! use tokio_postgres::{NoTls, Error, Row}; //! -//! # #[cfg(not(feature = "runtime"))] -//! # let fut = futures::future::ok(()); //! # #[cfg(feature = "runtime")] -//! let fut = -//! // Connect to the database -//! tokio_postgres::connect("host=localhost user=postgres", NoTls) -//! -//! .map(|(client, connection)| { -//! // The connection object performs the actual communication with the database, -//! // so spawn it off to run on its own. -//! let connection = connection.map_err(|e| eprintln!("connection error: {}", e)); -//! tokio::spawn(connection); -//! -//! // The client is what you use to make requests. -//! client -//! }) -//! -//! .and_then(|mut client| { -//! // Now we can prepare a simple statement that just returns its parameter. -//! client.prepare("SELECT $1::TEXT") -//! .map(|statement| (client, statement)) -//! }) -//! -//! .and_then(|(mut client, statement)| { -//! // And then execute it, returning a Stream of Rows which we collect into a Vec -//! client.query(&statement, &[&"hello world"]).collect() -//! }) +//! #[tokio::main] +//! async fn main() -> Result<(), Error> { +//! // Connect to the database. +//! let (mut client, connection) = tokio_postgres::connect("host=localhost user=postgres", NoTls).await?; +//! +//! // The connection object performs the actual communication with the database, +//! // so spawn it off to run on its own. +//! let connection = connection.map(|r| { +//! if let Err(e) = r { +//! eprintln!("connection error: {}", e); +//! } +//! }); +//! tokio::spawn(connection); +//! +//! // Now we can prepare a simple statement that just returns its parameter. +//! let stmt = client.prepare("SELECT $1::TEXT").await?; +//! +//! // And then execute it, returning a Stream of Rows which we collect into a Vec. +//! let rows: Vec = client +//! .query(&stmt, &[&"hello world"]) +//! .try_collect() +//! .await?; //! //! // Now we can check that we got back the same string we sent over. -//! .map(|rows| { -//! let value: &str = rows[0].get(0); -//! assert_eq!(value, "hello world"); -//! }) -//! -//! // And report any errors that happened. -//! .map_err(|e| { -//! eprintln!("error: {}", e); -//! }); +//! let value: &str = rows[0].get(0); +//! assert_eq!(value, "hello world"); //! -//! // By default, tokio_postgres uses the tokio crate as its runtime. -//! tokio::run(fut); +//! Ok(()) +//! } //! ``` //! //! # Behavior @@ -84,15 +75,18 @@ //! combinator): //! //! ```rust -//! use futures::Future; +//! use futures::future; +//! use std::future::Future; //! use tokio_postgres::{Client, Error, Statement}; //! //! fn pipelined_prepare( //! client: &mut Client, -//! ) -> impl Future +//! ) -> impl Future> //! { -//! client.prepare("SELECT * FROM foo") -//! .join(client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)")) +//! future::try_join( +//! client.prepare("SELECT * FROM foo"), +//! client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)") +//! ) //! } //! ``` //! From 88399a790c4b5f1a470eba397d685d247de29111 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 29 Jul 2019 21:36:07 -0700 Subject: [PATCH 204/819] Cancel query support --- tokio-postgres/src/cancel_query.rs | 42 ++++++++++++++++ tokio-postgres/src/cancel_query_raw.rs | 34 +++++++++++++ tokio-postgres/src/client.rs | 67 +++++++++++++++++++++++++- tokio-postgres/src/config.rs | 2 +- tokio-postgres/src/connect.rs | 37 +++++++++++--- tokio-postgres/src/connect_raw.rs | 5 +- tokio-postgres/src/connect_socket.rs | 50 ++++++++++--------- tokio-postgres/src/lib.rs | 3 ++ tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/tests/test/main.rs | 54 ++++++++------------- tokio-postgres/tests/test/runtime.rs | 51 ++++++++------------ 11 files changed, 244 insertions(+), 103 deletions(-) create mode 100644 tokio-postgres/src/cancel_query.rs create mode 100644 tokio-postgres/src/cancel_query_raw.rs diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs new file mode 100644 index 000000000..79c0468f7 --- /dev/null +++ b/tokio-postgres/src/cancel_query.rs @@ -0,0 +1,42 @@ +use crate::client::SocketConfig; +use crate::config::{SslMode, Host}; +use crate::{cancel_query_raw, connect_socket, connect_tls, Error, Socket}; +use std::io; +use crate::tls::MakeTlsConnect; + +pub(crate) async fn cancel_query( + config: Option, + ssl_mode: SslMode, + mut tls: T, + process_id: i32, + secret_key: i32, +) -> Result<(), Error> where T: MakeTlsConnect { + let config = match config { + Some(config) => config, + None => { + return Err(Error::connect(io::Error::new( + io::ErrorKind::InvalidInput, + "unknown host", + ))) + } + }; + + let hostname = match &config.host { + Host::Tcp(host) => &**host, + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter + #[cfg(unix)] + Host::Unix(_) => "", + }; + let tls = tls.make_tls_connect(hostname).map_err(|e| Error::tls(e.into()))?; + + let socket = connect_socket::connect_socket( + &config.host, + config.port, + config.connect_timeout, + config.keepalives, + config.keepalives_idle, + ) + .await?; + + cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, process_id, secret_key).await +} diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs new file mode 100644 index 000000000..ecf85600b --- /dev/null +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -0,0 +1,34 @@ +use crate::config::SslMode; +use crate::tls::TlsConnect; +use crate::{connect_tls, Error}; +use futures::future; +use postgres_protocol::message::frontend; +use std::pin::Pin; +use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; + +pub async fn cancel_query_raw( + stream: S, + mode: SslMode, + tls: T, + process_id: i32, + secret_key: i32, +) -> Result<(), Error> +where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, +{ + let (mut stream, _) = connect_tls::connect_tls(stream, mode, tls).await?; + + let mut buf = vec![]; + frontend::cancel_request(process_id, secret_key, &mut buf); + + stream.write_all(&buf).await.map_err(Error::io)?; + future::poll_fn(|cx| Pin::new(&mut stream).poll_flush(cx)) + .await + .map_err(Error::io)?; + future::poll_fn(|cx| Pin::new(&mut stream).poll_shutdown(cx)) + .await + .map_err(Error::io)?; + + Ok(()) +} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 909246e58..337bba1e8 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,7 +1,13 @@ use crate::codec::BackendMessages; +use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; -use crate::query; +#[cfg(feature = "runtime")] +use crate::tls::MakeTlsConnect; +use crate::tls::TlsConnect; use crate::types::{Oid, ToSql, Type}; +#[cfg(feature = "runtime")] +use crate::Socket; +use crate::{cancel_query, cancel_query_raw, query}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; @@ -15,6 +21,8 @@ use std::collections::HashMap; use std::future::Future; use std::sync::Arc; use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::io::{AsyncRead, AsyncWrite}; pub struct Responses { receiver: mpsc::Receiver, @@ -101,8 +109,19 @@ impl InnerClient { } } +#[derive(Clone)] +pub(crate) struct SocketConfig { + pub host: Host, + pub port: u16, + pub connect_timeout: Option, + pub keepalives: bool, + pub keepalives_idle: Duration, +} + pub struct Client { inner: Arc, + socket_config: Option, + ssl_mode: SslMode, process_id: i32, secret_key: i32, } @@ -110,6 +129,7 @@ pub struct Client { impl Client { pub(crate) fn new( sender: mpsc::UnboundedSender, + ssl_mode: SslMode, process_id: i32, secret_key: i32, ) -> Client { @@ -123,6 +143,8 @@ impl Client { types: HashMap::new(), }), }), + socket_config: None, + ssl_mode, process_id, secret_key, } @@ -132,6 +154,10 @@ impl Client { self.inner.clone() } + pub(crate) fn set_socket_config(&mut self, socket_config: SocketConfig) { + self.socket_config = Some(socket_config); + } + /// Creates a new prepared statement. /// /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), @@ -247,4 +273,43 @@ impl Client { pub fn batch_execute(&mut self, query: &str) -> impl Future> { simple_query::batch_execute(self.inner(), query) } + + /// Attempts to cancel an in-progress query. + /// + /// The server provides no information about whether a cancellation attempt was successful or not. An error will + /// only be returned if the client was unable to connect to the database. + /// + /// Requires the `runtime` Cargo feature (enabled by default). + pub fn cancel_query(&mut self, tls: T) -> impl Future> + where + T: MakeTlsConnect, + { + cancel_query::cancel_query( + self.socket_config.clone(), + self.ssl_mode, + tls, + self.process_id, + self.secret_key, + ) + } + + /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new + /// connection itself. + pub fn cancel_query_raw( + &mut self, + stream: S, + tls: T, + ) -> impl Future> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, + { + cancel_query_raw::cancel_query_raw( + stream, + self.ssl_mode, + tls, + self.process_id, + self.secret_key, + ) + } } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index de120ea96..68fe44596 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -396,7 +396,7 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, tls, self, None).await + connect_raw(stream, tls, self).await } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 587f18578..86e4532d5 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1,15 +1,16 @@ +use crate::client::SocketConfig; use crate::config::{Host, TargetSessionAttrs}; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::{Stream, FutureExt}; use futures::future; +use futures::{FutureExt, Stream}; use pin_utils::pin_mut; -use std::io; use std::future::Future; -use std::task::Poll; +use std::io; use std::pin::Pin; +use std::task::Poll; pub async fn connect( mut tls: T, @@ -28,6 +29,12 @@ where let mut error = None; for (i, host) in config.host.iter().enumerate() { + let port = *config + .port + .get(i) + .or_else(|| config.port.get(0)) + .unwrap_or(&5432); + let hostname = match host { Host::Tcp(host) => &**host, // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter @@ -39,7 +46,7 @@ where .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - match connect_once(i, tls, config).await { + match connect_once(host, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } @@ -49,15 +56,23 @@ where } async fn connect_once( - idx: usize, + host: &Host, + port: u16, tls: T, config: &Config, ) -> Result<(Client, Connection), Error> where T: TlsConnect, { - let socket = connect_socket(idx, config).await?; - let (mut client, mut connection) = connect_raw(socket, tls, config, Some(idx)).await?; + let socket = connect_socket( + host, + port, + config.connect_timeout, + config.keepalives, + config.keepalives_idle, + ) + .await?; + let (mut client, mut connection) = connect_raw(socket, tls, config).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { let rows = client.simple_query("SHOW transaction_read_only"); @@ -89,5 +104,13 @@ where } } + client.set_socket_config(SocketConfig { + host: host.clone(), + port, + connect_timeout: config.connect_timeout, + keepalives: config.keepalives, + keepalives_idle: config.keepalives_idle, + }); + Ok((client, connection)) } diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 498658e19..a8f404dde 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -1,5 +1,5 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; -use crate::config::Config; +use crate::config::{Config, SslMode}; use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::{ChannelBinding, TlsConnect}; @@ -80,7 +80,6 @@ pub async fn connect_raw( stream: S, tls: T, config: &Config, - idx: Option, ) -> Result<(Client, Connection), Error> where S: AsyncRead + AsyncWrite + Unpin, @@ -98,7 +97,7 @@ where let (process_id, secret_key, parameters) = read_info(&mut stream).await?; let (sender, receiver) = mpsc::unbounded(); - let client = Client::new(sender, process_id, secret_key); + let client = Client::new(sender, config.ssl_mode, process_id, secret_key); let connection = Connection::new(stream.inner, parameters, receiver); Ok((client, connection)) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index bcc00ed04..01643ae63 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -3,19 +3,20 @@ use crate::{Config, Error, Socket}; use std::future::Future; use std::io; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; +use std::time::Duration; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; use tokio::timer::Timeout; -pub async fn connect_socket(idx: usize, config: &Config) -> Result { - let port = *config - .port - .get(idx) - .or_else(|| config.port.get(0)) - .unwrap_or(&5432); - - match &config.host[idx] { +pub(crate) async fn connect_socket( + host: &Host, + port: u16, + connect_timeout: Option, + keepalives: bool, + keepalives_idle: Duration, +) -> Result { + match host { Host::Tcp(host) => { let addrs = match host.parse::() { Ok(ip) => { @@ -30,19 +31,20 @@ pub async fn connect_socket(idx: usize, config: &Config) -> Result { - socket.set_nodelay(true).map_err(Error::connect)?; - if config.keepalives { - socket - .set_keepalive(Some(config.keepalives_idle)) - .map_err(Error::connect)?; - } + let new_error = + match connect_with_timeout(TcpStream::connect(&addr), connect_timeout).await { + Ok(socket) => { + socket.set_nodelay(true).map_err(Error::connect)?; + if keepalives { + socket + .set_keepalive(Some(keepalives_idle)) + .map_err(Error::connect)?; + } - return Ok(Socket::new_tcp(socket)); - } - Err(e) => e, - }; + return Ok(Socket::new_tcp(socket)); + } + Err(e) => e, + }; error = Some(new_error); } @@ -56,18 +58,18 @@ pub async fn connect_socket(idx: usize, config: &Config) -> Result { - let socket = connect_timeout(UnixStream::connect(path), config).await?; + let socket = connect_with_timeout(UnixStream::connect(path), connect_timeout).await?; Ok(Socket::new_unix(socket)) } } } -async fn connect_timeout(connect: F, config: &Config) -> Result +async fn connect_with_timeout(connect: F, timeout: Option) -> Result where F: Future>, { - match config.connect_timeout { - Some(connect_timeout) => match Timeout::new(connect, connect_timeout).await { + match timeout { + Some(timeout) => match Timeout::new(connect, timeout).await { Ok(Ok(socket)) => Ok(socket), Ok(Err(e)) => Err(Error::connect(e)), Err(_) => Err(Error::connect(io::Error::new( diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 15d4d1520..0e75cd975 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -119,6 +119,9 @@ use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; pub use statement::{Column, Statement}; +#[cfg(feature = "runtime")] +mod cancel_query; +mod cancel_query_raw; mod client; mod codec; pub mod config; diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 8be147bb5..851673cad 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -1,5 +1,4 @@ use crate::client::InnerClient; -use pin_utils::pin_mut; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::error::SqlState; @@ -8,6 +7,7 @@ use crate::types::{Field, Kind, Oid, ToSql, Type}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; use futures::{future, TryStreamExt}; +use pin_utils::pin_mut; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 63012718a..ce39f5d2a 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,12 +1,14 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::{try_join, FutureExt, TryStreamExt}; +use futures::{join, try_join, FutureExt, TryStreamExt}; use tokio::net::TcpStream; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{Client, Config, Connection, Error, SimpleQueryMessage}; +use tokio::timer::Delay; +use std::time::{Duration, Instant}; mod parse; #[cfg(feature = "runtime")] @@ -295,6 +297,22 @@ async fn simple_query() { assert_eq!(messages.len(), 5); } +#[tokio::test] +async fn cancel_query_raw() { + let mut client = connect("user=postgres").await; + + let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()).await.unwrap(); + let cancel = client.cancel_query_raw(socket, NoTls); + let cancel = Delay::new(Instant::now() + Duration::from_millis(100)).then(|()| cancel); + + let sleep = client.batch_execute("SELECT pg_sleep(100)"); + + match join!(sleep, cancel) { + (Err(ref e), Ok(())) if e.code() == Some(&SqlState::QUERY_CANCELED) => {} + t => panic!("unexpected return: {:?}", t), + } +} + /* #[test] fn query_portal() { @@ -340,40 +358,6 @@ fn query_portal() { assert_eq!(r3.len(), 0); } -#[test] -fn cancel_query_raw() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let sleep = client - .simple_query("SELECT pg_sleep(100)") - .for_each(|_| Ok(())) - .then(|r| match r { - Ok(_) => panic!("unexpected success"), - Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), - Err(e) => panic!("unexpected error {}", e), - }); - let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) - .then(|r| { - r.unwrap(); - TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - }) - .then(|r| { - let s = r.unwrap(); - client.cancel_query_raw(s, NoTls) - }) - .then(|r| { - r.unwrap(); - Ok::<(), ()>(()) - }); - - let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); -} - #[test] fn notifications() { let _ = env_logger::try_init(); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index cf26d7705..ffc3df173 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,14 +1,20 @@ -use futures::{FutureExt, TryStreamExt}; +use futures::{FutureExt, TryStreamExt, join}; use std::time::{Duration, Instant}; use tokio::timer::Delay; use tokio_postgres::error::SqlState; -use tokio_postgres::NoTls; +use tokio_postgres::{NoTls, Client}; -async fn smoke_test(s: &str) { - let (mut client, connection) = tokio_postgres::connect(s, NoTls).await.unwrap(); +async fn connect(s: &str) -> Client { + let (client, connection) = tokio_postgres::connect(s, NoTls).await.unwrap(); let connection = connection.map(|e| e.unwrap()); tokio::spawn(connection); + client +} + +async fn smoke_test(s: &str) { + let mut client = connect(s).await; + let stmt = client.prepare("SELECT $1::INT").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) @@ -64,34 +70,17 @@ async fn target_session_attrs_err() { .unwrap(); } -/* -#[test] -fn cancel_query() { - let mut runtime = Runtime::new().unwrap(); +#[tokio::test] +async fn cancel_query() { + let mut client = connect("host=localhost port=5433 user=postgres").await; - let connect = tokio_postgres::connect("host=localhost port=5433 user=postgres", NoTls); - let (mut client, connection) = runtime.block_on(connect).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let cancel = client.cancel_query(NoTls); + let cancel = Delay::new(Instant::now() + Duration::from_millis(100)).then(|()| cancel); - let sleep = client - .simple_query("SELECT pg_sleep(100)") - .for_each(|_| Ok(())) - .then(|r| match r { - Ok(_) => panic!("unexpected success"), - Err(ref e) if e.code() == Some(&SqlState::QUERY_CANCELED) => Ok::<(), ()>(()), - Err(e) => panic!("unexpected error {}", e), - }); - let cancel = Delay::new(Instant::now() + Duration::from_millis(100)) - .then(|r| { - r.unwrap(); - client.cancel_query(NoTls) - }) - .then(|r| { - r.unwrap(); - Ok::<(), ()>(()) - }); + let sleep = client.batch_execute("SELECT pg_sleep(100)"); - let ((), ()) = runtime.block_on(sleep.join(cancel)).unwrap(); + match join!(sleep, cancel) { + (Err(ref e), Ok(())) if e.code() == Some(&SqlState::QUERY_CANCELED) => {} + t => panic!("unexpected return: {:?}", t), + } } -*/ From 4afd5235db796aaeb417855ee1f766cd1cda8af9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 30 Jul 2019 19:54:30 -0700 Subject: [PATCH 205/819] Transaction support --- tokio-postgres/src/client.rs | 11 +- tokio-postgres/src/lib.rs | 2 + tokio-postgres/src/transaction.rs | 157 ++++++++++++++++++++++++ tokio-postgres/tests/test/main.rs | 193 ++++++++++-------------------- 4 files changed, 233 insertions(+), 130 deletions(-) create mode 100644 tokio-postgres/src/transaction.rs diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 337bba1e8..a735e43e7 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -7,7 +7,7 @@ use crate::tls::TlsConnect; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query, cancel_query_raw, query}; +use crate::{cancel_query, cancel_query_raw, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; @@ -274,12 +274,21 @@ impl Client { simple_query::batch_execute(self.inner(), query) } + /// Begins a new database transaction. + /// + /// The transaction will roll back by default - use the `commit` method to commit it. + pub async fn transaction(&mut self) -> Result, Error> { + self.batch_execute("BEGIN").await?; + Ok(Transaction::new(self)) + } + /// Attempts to cancel an in-progress query. /// /// The server provides no information about whether a cancellation attempt was successful or not. An error will /// only be returned if the client was unable to connect to the database. /// /// Requires the `runtime` Cargo feature (enabled by default). + #[cfg(feature = "runtime")] pub fn cancel_query(&mut self, tls: T) -> impl Future> where T: MakeTlsConnect, diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0e75cd975..363200485 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -108,6 +108,7 @@ pub use crate::client::Client; pub use crate::config::Config; +pub use crate::transaction::Transaction; pub use crate::connection::Connection; use crate::error::DbError; pub use crate::error::Error; @@ -141,6 +142,7 @@ mod simple_query; #[cfg(feature = "runtime")] mod socket; mod statement; +mod transaction; pub mod tls; pub mod types; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs new file mode 100644 index 000000000..01853c6d5 --- /dev/null +++ b/tokio-postgres/src/transaction.rs @@ -0,0 +1,157 @@ +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +#[cfg(feature = "runtime")] +use crate::tls::MakeTlsConnect; +use crate::tls::TlsConnect; +use crate::types::{ToSql, Type}; +#[cfg(feature = "runtime")] +use crate::Socket; +use crate::{query, Client, Error, Row, SimpleQueryMessage, Statement}; +use futures::Stream; +use postgres_protocol::message::frontend; +use std::future::Future; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// A representation of a PostgreSQL database transaction. +/// +/// Transactions will implicitly roll back when dropped. Use the `commit` method to commit the changes made in the +/// transaction. Transactions can be nested, with inner transactions implemented via safepoints. +pub struct Transaction<'a> { + client: &'a mut Client, + done: bool, +} + +impl<'a> Drop for Transaction<'a> { + fn drop(&mut self) { + if self.done { + return; + } + + let mut buf = vec![]; + frontend::query("ROLLBACK", &mut buf).unwrap(); + let _ = self + .client + .inner() + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } +} + +impl<'a> Transaction<'a> { + pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { + Transaction { + client, + done: false, + } + } + + /// Consumes the transaction, committing all changes made within it. + pub async fn commit(mut self) -> Result<(), Error> { + self.done = true; + self.client.batch_execute("COMMIT").await + } + + /// Rolls the transaction back, discarding all changes made within it. + /// + /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. + pub async fn rollback(mut self) -> Result<(), Error> { + self.done = true; + self.client.batch_execute("ROLLBACK").await + } + + /// Like `Client::prepare`. + pub fn prepare(&mut self, query: &str) -> impl Future> { + self.client.prepare(query) + } + + /// Like `Client::prepare_typed`. + pub fn prepare_typed( + &mut self, + query: &str, + parameter_types: &[Type], + ) -> impl Future> { + self.client.prepare_typed(query, parameter_types) + } + + /// Like `Client::query`. + pub fn query( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Stream> { + self.client.query(statement, params) + } + + /// Like `Client::query_iter`. + pub fn query_iter<'b, I>( + &mut self, + statement: &Statement, + params: I, + ) -> impl Stream> + 'static + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + // https://github.com/rust-lang/rust/issues/63032 + let buf = query::encode(statement, params); + query::query(self.client.inner(), statement.clone(), buf) + } + + /// Like `Client::execute`. + pub fn execute( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Future> { + self.client.execute(statement, params) + } + + /// Like `Client::execute_iter`. + pub fn execute_iter<'b, I>( + &mut self, + statement: &Statement, + params: I, + ) -> impl Future> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + // https://github.com/rust-lang/rust/issues/63032 + let buf = query::encode(statement, params); + query::execute(self.client.inner(), buf) + } + + /// Like `Client::simple_query`. + pub fn simple_query( + &mut self, + query: &str, + ) -> impl Stream> { + self.client.simple_query(query) + } + + /// Like `Client::batch_execute`. + pub fn batch_execute(&mut self, query: &str) -> impl Future> { + self.client.batch_execute(query) + } + + /// Like `Client::cancel_query`. + #[cfg(feature = "runtime")] + pub fn cancel_query(&mut self, tls: T) -> impl Future> + where + T: MakeTlsConnect, + { + self.client.cancel_query(tls) + } + + /// Like `Client::cancel_query_raw`. + pub fn cancel_query_raw( + &mut self, + stream: S, + tls: T, + ) -> impl Future> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, + { + self.client.cancel_query_raw(stream, tls) + } +} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index ce39f5d2a..8c246bad5 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -313,6 +313,70 @@ async fn cancel_query_raw() { } } +#[tokio::test] +async fn transaction_commit() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo( + id SERIAL, + name TEXT + )", + ).await.unwrap(); + + let mut transaction = client.transaction().await.unwrap(); + transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + transaction.commit().await.unwrap(); + + let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); + let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, &str>(0), "steven"); +} + +#[tokio::test] +async fn transaction_rollback() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo( + id SERIAL, + name TEXT + )", + ).await.unwrap(); + + let mut transaction = client.transaction().await.unwrap(); + transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + transaction.rollback().await.unwrap(); + + let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); + let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + + assert_eq!(rows.len(), 0); +} + +#[tokio::test] +async fn transaction_rollback_drop() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo( + id SERIAL, + name TEXT + )", + ).await.unwrap(); + + let mut transaction = client.transaction().await.unwrap(); + transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + drop(transaction); + + let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); + let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + + assert_eq!(rows.len(), 0); +} + /* #[test] fn query_portal() { @@ -402,89 +466,6 @@ fn notifications() { assert_eq!(notifications[1].payload(), "world"); } -#[test] -fn transaction_commit() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - )", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let f = client - .simple_query("INSERT INTO foo (name) VALUES ('steven')") - .for_each(|_| Ok(())); - runtime - .block_on(client.build_transaction().build(f)) - .unwrap(); - - let rows = runtime - .block_on( - client - .prepare("SELECT name FROM foo") - .and_then(|s| client.query(&s, &[]).collect()), - ) - .unwrap(); - - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].get::<_, &str>(0), "steven"); -} - -#[test] -fn transaction_abort() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - )", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let f = client - .simple_query("INSERT INTO foo (name) VALUES ('steven')") - .for_each(|_| Ok(())) - .map_err(|e| Box::new(e) as Box) - .and_then(|_| Err::<(), _>(Box::::from(""))); - runtime - .block_on(client.build_transaction().build(f)) - .unwrap_err(); - - let rows = runtime - .block_on( - client - .prepare("SELECT name FROM foo") - .and_then(|s| client.query(&s, &[]).collect()), - ) - .unwrap(); - - assert_eq!(rows.len(), 0); -} - #[test] fn copy_in() { let _ = env_logger::try_init(); @@ -651,52 +632,6 @@ fn copy_out() { assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } -#[test] -fn transaction_builder_around_moved_client() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let transaction_builder = client.build_transaction(); - let work = client - .simple_query( - "CREATE TEMPORARY TABLE transaction_foo ( - id SERIAL, - name TEXT - )", - ) - .for_each(|_| Ok(())) - .and_then(move |_| { - client - .prepare("INSERT INTO transaction_foo (name) VALUES ($1), ($2)") - .map(|statement| (client, statement)) - }) - .and_then(|(mut client, statement)| { - client - .query(&statement, &[&"jim", &"joe"]) - .collect() - .map(|_res| client) - }); - - let transaction = transaction_builder.build(work); - let mut client = runtime.block_on(transaction).unwrap(); - - let data = runtime - .block_on( - client - .prepare("COPY transaction_foo TO STDOUT") - .and_then(|s| client.copy_out(&s, &[]).concat2()), - ) - .unwrap(); - assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); - - drop(client); - runtime.run().unwrap(); -} - #[test] fn poll_idle_running() { struct DelayStream(Delay); From f45884711f4b06d44fc28768a98873f064746688 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 30 Jul 2019 21:25:30 -0700 Subject: [PATCH 206/819] Support copy_in --- tokio-postgres/src/cancel_query.rs | 13 +- tokio-postgres/src/client.rs | 30 ++- tokio-postgres/src/connection.rs | 20 ++ tokio-postgres/src/copy_in.rs | 155 +++++++++++++++ tokio-postgres/src/lib.rs | 10 +- tokio-postgres/src/transaction.rs | 20 +- tokio-postgres/tests/test/main.rs | 276 ++++++++++++--------------- tokio-postgres/tests/test/runtime.rs | 4 +- 8 files changed, 365 insertions(+), 163 deletions(-) create mode 100644 tokio-postgres/src/copy_in.rs diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index 79c0468f7..ebfea91a1 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -1,8 +1,8 @@ use crate::client::SocketConfig; -use crate::config::{SslMode, Host}; +use crate::config::{Host, SslMode}; +use crate::tls::MakeTlsConnect; use crate::{cancel_query_raw, connect_socket, connect_tls, Error, Socket}; use std::io; -use crate::tls::MakeTlsConnect; pub(crate) async fn cancel_query( config: Option, @@ -10,7 +10,10 @@ pub(crate) async fn cancel_query( mut tls: T, process_id: i32, secret_key: i32, -) -> Result<(), Error> where T: MakeTlsConnect { +) -> Result<(), Error> +where + T: MakeTlsConnect, +{ let config = match config { Some(config) => config, None => { @@ -27,7 +30,9 @@ pub(crate) async fn cancel_query( #[cfg(unix)] Host::Unix(_) => "", }; - let tls = tls.make_tls_connect(hostname).map_err(|e| Error::tls(e.into()))?; + let tls = tls + .make_tls_connect(hostname) + .map_err(|e| Error::tls(e.into()))?; let socket = connect_socket::connect_socket( &config.host, diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index a735e43e7..6971228fa 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -7,17 +7,19 @@ use crate::tls::TlsConnect; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query, cancel_query_raw, query, Transaction}; +use crate::{cancel_query, cancel_query_raw, copy_in, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; +use bytes::IntoBuf; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, Stream}; +use futures::{future, Stream, TryStream}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; use std::collections::HashMap; +use std::error; use std::future::Future; use std::sync::Arc; use std::task::{Context, Poll}; @@ -240,6 +242,30 @@ impl Client { query::execute(self.inner(), buf) } + /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. + /// + /// The data in the provided stream is passed along to the server verbatim; it is the caller's responsibility to + /// ensure it uses the proper format. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + pub fn copy_in( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + stream: S, + ) -> impl Future> + where + S: TryStream, + S::Ok: IntoBuf, + ::Buf: 'static + Send, + S::Error: Into>, + { + let buf = query::encode(statement, params.iter().cloned()); + copy_in::copy_in(self.inner(), buf, stream) + } + /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index cbbec2d1e..b7b5fa170 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -1,4 +1,5 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; +use crate::copy_in::CopyInReceiver; use crate::error::DbError; use crate::maybe_tls_stream::MaybeTlsStream; use crate::{AsyncMessage, Error, Notification}; @@ -17,6 +18,7 @@ use tokio::io::{AsyncRead, AsyncWrite}; pub enum RequestMessages { Single(FrontendMessage), + CopyIn(CopyInReceiver), } pub struct Request { @@ -237,6 +239,24 @@ where self.state = State::Closing; } } + RequestMessages::CopyIn(mut receiver) => { + let message = match receiver.poll_next_unpin(cx) { + Poll::Ready(Some(message)) => message, + Poll::Ready(None) => { + trace!("poll_write: finished copy_in request"); + continue; + } + Poll::Pending => { + trace!("poll_write: waiting on copy_in stream"); + self.pending_request = Some(RequestMessages::CopyIn(receiver)); + return Ok(true); + } + }; + Pin::new(&mut self.stream) + .start_send(message) + .map_err(Error::io)?; + self.pending_request = Some(RequestMessages::CopyIn(receiver)); + } } } } diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs new file mode 100644 index 000000000..dad0bfad5 --- /dev/null +++ b/tokio-postgres/src/copy_in.rs @@ -0,0 +1,155 @@ +use crate::client::InnerClient; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +use crate::Error; +use bytes::{Buf, BufMut, BytesMut, IntoBuf}; +use futures::channel::mpsc; +use futures::ready; +use futures::{SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use pin_utils::pin_mut; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::error; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use postgres_protocol::message::frontend::CopyData; + +enum CopyInMessage { + Message(FrontendMessage), + Done, +} + +pub struct CopyInReceiver { + receiver: mpsc::Receiver, + done: bool, +} + +impl CopyInReceiver { + fn new(receiver: mpsc::Receiver) -> CopyInReceiver { + CopyInReceiver { + receiver, + done: false, + } + } +} + +impl Stream for CopyInReceiver { + type Item = FrontendMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.done { + return Poll::Ready(None); + } + + match ready!(self.receiver.poll_next_unpin(cx)) { + Some(CopyInMessage::Message(message)) => Poll::Ready(Some(message)), + Some(CopyInMessage::Done) => { + self.done = true; + let mut buf = vec![]; + frontend::copy_done(&mut buf); + frontend::sync(&mut buf); + Poll::Ready(Some(FrontendMessage::Raw(buf))) + } + None => { + self.done = true; + let mut buf = vec![]; + frontend::copy_fail("", &mut buf).unwrap(); + frontend::sync(&mut buf); + Poll::Ready(Some(FrontendMessage::Raw(buf))) + } + } + } +} + +pub async fn copy_in( + client: Arc, + buf: Result, Error>, + stream: S, +) -> Result +where + S: TryStream, + S::Ok: IntoBuf, + ::Buf: 'static + Send, + S::Error: Into>, +{ + let buf = buf?; + + let (mut sender, receiver) = mpsc::channel(1); + let receiver = CopyInReceiver::new(receiver); + let mut responses = client.send(RequestMessages::CopyIn(receiver))?; + + sender + .send(CopyInMessage::Message(FrontendMessage::Raw(buf))) + .await + .map_err(|_| Error::closed())?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + match responses.next().await? { + Message::CopyInResponse(_) => {} + _ => return Err(Error::unexpected_message()), + } + + let mut bytes = BytesMut::new(); + let stream = stream.into_stream(); + pin_mut!(stream); + + while let Some(buf) = stream.try_next().await.map_err(Error::copy_in_stream)? { + let buf = buf.into_buf(); + + let data: Box = if buf.remaining() > 4096 { + if bytes.is_empty() { + Box::new(buf) + } else { + Box::new(bytes.take().freeze().into_buf().chain(buf)) + } + } else { + bytes.reserve(buf.remaining()); + bytes.put(buf); + if bytes.len() > 4096 { + Box::new(bytes.take().freeze().into_buf()) + } else { + continue; + } + }; + + let data = CopyData::new(data).map_err(Error::encode)?; + sender + .send(CopyInMessage::Message(FrontendMessage::CopyData(data))) + .await + .map_err(|_| Error::closed())?; + } + + if !bytes.is_empty() { + let data: Box = Box::new(bytes.freeze().into_buf()); + let data = CopyData::new(data).map_err(Error::encode)?; + sender + .send(CopyInMessage::Message(FrontendMessage::CopyData(data))) + .await + .map_err(|_| Error::closed())?; + } + + sender + .send(CopyInMessage::Done) + .await + .map_err(|_| Error::closed())?; + + match responses.next().await? { + Message::CommandComplete(body) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + Ok(rows) + } + _ => Err(Error::unexpected_message()), + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 363200485..43989ba8b 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -9,10 +9,11 @@ //! use tokio_postgres::{NoTls, Error, Row}; //! //! # #[cfg(feature = "runtime")] -//! #[tokio::main] +//! #[tokio::main] // By default, tokio_postgres uses the tokio crate as its runtime. //! async fn main() -> Result<(), Error> { //! // Connect to the database. -//! let (mut client, connection) = tokio_postgres::connect("host=localhost user=postgres", NoTls).await?; +//! let (mut client, connection) = +//! tokio_postgres::connect("host=localhost user=postgres", NoTls).await?; //! //! // The connection object performs the actual communication with the database, //! // so spawn it off to run on its own. @@ -108,7 +109,6 @@ pub use crate::client::Client; pub use crate::config::Config; -pub use crate::transaction::Transaction; pub use crate::connection::Connection; use crate::error::DbError; pub use crate::error::Error; @@ -118,6 +118,7 @@ pub use crate::socket::Socket; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; +pub use crate::transaction::Transaction; pub use statement::{Column, Statement}; #[cfg(feature = "runtime")] @@ -133,6 +134,7 @@ mod connect_raw; mod connect_socket; mod connect_tls; mod connection; +mod copy_in; pub mod error; mod maybe_tls_stream; mod prepare; @@ -142,8 +144,8 @@ mod simple_query; #[cfg(feature = "runtime")] mod socket; mod statement; -mod transaction; pub mod tls; +mod transaction; pub mod types; /// A convenience function which parses a connection string and connects to the database. diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 01853c6d5..403627452 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -7,8 +7,10 @@ use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{query, Client, Error, Row, SimpleQueryMessage, Statement}; -use futures::Stream; +use bytes::IntoBuf; +use futures::{Stream, TryStream}; use postgres_protocol::message::frontend; +use std::error; use std::future::Future; use tokio::io::{AsyncRead, AsyncWrite}; @@ -120,6 +122,22 @@ impl<'a> Transaction<'a> { query::execute(self.client.inner(), buf) } + /// Like `Client::copy_in`. + pub fn copy_in( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + stream: S, + ) -> impl Future> + where + S: TryStream, + S::Ok: IntoBuf, + ::Buf: 'static + Send, + S::Error: Into>, + { + self.client.copy_in(statement, params, stream) + } + /// Like `Client::simple_query`. pub fn simple_query( &mut self, diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 8c246bad5..8e15f63be 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -2,13 +2,15 @@ #![feature(async_await)] use futures::{join, try_join, FutureExt, TryStreamExt}; +use std::fmt::Write; +use std::time::{Duration, Instant}; +use futures::stream; use tokio::net::TcpStream; +use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{Client, Config, Connection, Error, SimpleQueryMessage}; -use tokio::timer::Delay; -use std::time::{Duration, Instant}; mod parse; #[cfg(feature = "runtime")] @@ -301,7 +303,9 @@ async fn simple_query() { async fn cancel_query_raw() { let mut client = connect("user=postgres").await; - let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()).await.unwrap(); + let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .await + .unwrap(); let cancel = client.cancel_query_raw(socket, NoTls); let cancel = Delay::new(Instant::now() + Duration::from_millis(100)).then(|()| cancel); @@ -317,19 +321,29 @@ async fn cancel_query_raw() { async fn transaction_commit() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo( + client + .batch_execute( + "CREATE TEMPORARY TABLE foo( id SERIAL, name TEXT )", - ).await.unwrap(); + ) + .await + .unwrap(); let mut transaction = client.transaction().await.unwrap(); - transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + transaction + .batch_execute("INSERT INTO foo (name) VALUES ('steven')") + .await + .unwrap(); transaction.commit().await.unwrap(); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); - let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + let rows = client + .query(&stmt, &[]) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "steven"); @@ -339,19 +353,29 @@ async fn transaction_commit() { async fn transaction_rollback() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo( + client + .batch_execute( + "CREATE TEMPORARY TABLE foo( id SERIAL, name TEXT )", - ).await.unwrap(); + ) + .await + .unwrap(); let mut transaction = client.transaction().await.unwrap(); - transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + transaction + .batch_execute("INSERT INTO foo (name) VALUES ('steven')") + .await + .unwrap(); transaction.rollback().await.unwrap(); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); - let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + let rows = client + .query(&stmt, &[]) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 0); } @@ -360,20 +384,105 @@ async fn transaction_rollback() { async fn transaction_rollback_drop() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo( + client + .batch_execute( + "CREATE TEMPORARY TABLE foo( id SERIAL, name TEXT )", - ).await.unwrap(); + ) + .await + .unwrap(); let mut transaction = client.transaction().await.unwrap(); - transaction.batch_execute("INSERT INTO foo (name) VALUES ('steven')").await.unwrap(); + transaction + .batch_execute("INSERT INTO foo (name) VALUES ('steven')") + .await + .unwrap(); drop(transaction); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); + let rows = client + .query(&stmt, &[]) + .try_collect::>() + .await + .unwrap(); + + assert_eq!(rows.len(), 0); +} + +#[tokio::test] +async fn copy_in() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER,\ + name TEXT\ + )" + ).await.unwrap(); + + let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); + let stream = stream::iter(vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()].into_iter().map(Ok::<_, String>)); + let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); + assert_eq!(rows, 2); + + let stmt = client.prepare("SELECT id, name FROM foo ORDER BY id").await.unwrap(); let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[0].get::<_, &str>(1), "jim"); + assert_eq!(rows[1].get::<_, i32>(0), 2); + assert_eq!(rows[1].get::<_, &str>(1), "joe"); +} + +#[tokio::test] +async fn copy_in_large() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER,\ + name TEXT\ + )" + ).await.unwrap(); + + let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); + + let a = "0\tname0\n".to_string(); + let mut b = String::new(); + for i in 1..5_000 { + writeln!(b, "{0}\tname{0}", i).unwrap(); + } + let mut c = String::new(); + for i in 5_000..10_000 { + writeln!(c, "{0}\tname{0}", i).unwrap(); + } + let stream = stream::iter(vec![a, b, c].into_iter().map(Ok::<_, String>)); + + let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); + assert_eq!(rows, 10_000); +} + +#[tokio::test] +async fn copy_in_error() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER,\ + name TEXT\ + )" + ).await.unwrap(); + + let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); + let stream = stream::iter(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); + let error = client.copy_in(&stmt, &[], stream).await.unwrap_err(); + assert!(error.to_string().contains("asdf")); + + let stmt = client.prepare("SELECT id, name FROM foo ORDER BY id").await.unwrap(); + let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); assert_eq!(rows.len(), 0); } @@ -466,139 +575,6 @@ fn notifications() { assert_eq!(notifications[1].payload(), "world"); } -#[test] -fn copy_in() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id INTEGER, - name TEXT - )", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let stream = stream::iter_ok::<_, String>(vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()]); - let rows = runtime - .block_on( - client - .prepare("COPY foo FROM STDIN") - .and_then(|s| client.copy_in(&s, &[], stream)), - ) - .unwrap(); - assert_eq!(rows, 2); - - let rows = runtime - .block_on( - client - .prepare("SELECT id, name FROM foo ORDER BY id") - .and_then(|s| client.query(&s, &[]).collect()), - ) - .unwrap(); - - assert_eq!(rows.len(), 2); - assert_eq!(rows[0].get::<_, i32>(0), 1); - assert_eq!(rows[0].get::<_, &str>(1), "jim"); - assert_eq!(rows[1].get::<_, i32>(0), 2); - assert_eq!(rows[1].get::<_, &str>(1), "joe"); -} - -#[test] -fn copy_in_large() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id INTEGER, - name TEXT - )", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let a = "0\tname0\n".to_string(); - let mut b = String::new(); - for i in 1..5_000 { - writeln!(b, "{0}\tname{0}", i).unwrap(); - } - let mut c = String::new(); - for i in 5_000..10_000 { - writeln!(c, "{0}\tname{0}", i).unwrap(); - } - - let stream = stream::iter_ok::<_, String>(vec![a, b, c]); - let rows = runtime - .block_on( - client - .prepare("COPY foo FROM STDIN") - .and_then(|s| client.copy_in(&s, &[], stream)), - ) - .unwrap(); - assert_eq!(rows, 10_000); -} - -#[test] -fn copy_in_error() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id INTEGER, - name TEXT - )", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let stream = stream::iter_result(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); - let error = runtime - .block_on( - client - .prepare("COPY foo FROM STDIN") - .and_then(|s| client.copy_in(&s, &[], stream)), - ) - .unwrap_err(); - assert!(error.to_string().contains("asdf")); - - let rows = runtime - .block_on( - client - .prepare("SELECT id, name FROM foo ORDER BY id") - .and_then(|s| client.query(&s, &[]).collect()), - ) - .unwrap(); - - assert_eq!(rows.len(), 0); -} - #[test] fn copy_out() { let _ = env_logger::try_init(); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index ffc3df173..f7d641a26 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,8 +1,8 @@ -use futures::{FutureExt, TryStreamExt, join}; +use futures::{join, FutureExt, TryStreamExt}; use std::time::{Duration, Instant}; use tokio::timer::Delay; use tokio_postgres::error::SqlState; -use tokio_postgres::{NoTls, Client}; +use tokio_postgres::{Client, NoTls}; async fn connect(s: &str) -> Client { let (client, connection) = tokio_postgres::connect(s, NoTls).await.unwrap(); From beb509f3f3d55131f09045298e2a3c9125bf6b07 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 30 Jul 2019 21:29:18 -0700 Subject: [PATCH 207/819] rustfmt --- tokio-postgres/src/copy_in.rs | 2 +- tokio-postgres/tests/test/main.rs | 93 ++++++++++++++++++++----------- 2 files changed, 61 insertions(+), 34 deletions(-) diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index dad0bfad5..c40df728a 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -9,11 +9,11 @@ use futures::{SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use pin_utils::pin_mut; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; +use postgres_protocol::message::frontend::CopyData; use std::error; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use postgres_protocol::message::frontend::CopyData; enum CopyInMessage { Message(FrontendMessage), diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 8e15f63be..8c8961119 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,10 +1,10 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] +use futures::stream; use futures::{join, try_join, FutureExt, TryStreamExt}; use std::fmt::Write; use std::time::{Duration, Instant}; -use futures::stream; use tokio::net::TcpStream; use tokio::timer::Delay; use tokio_postgres::error::SqlState; @@ -324,9 +324,9 @@ async fn transaction_commit() { client .batch_execute( "CREATE TEMPORARY TABLE foo( - id SERIAL, - name TEXT - )", + id SERIAL, + name TEXT + )", ) .await .unwrap(); @@ -356,9 +356,9 @@ async fn transaction_rollback() { client .batch_execute( "CREATE TEMPORARY TABLE foo( - id SERIAL, - name TEXT - )", + id SERIAL, + name TEXT + )", ) .await .unwrap(); @@ -387,9 +387,9 @@ async fn transaction_rollback_drop() { client .batch_execute( "CREATE TEMPORARY TABLE foo( - id SERIAL, - name TEXT - )", + id SERIAL, + name TEXT + )", ) .await .unwrap(); @@ -415,20 +415,34 @@ async fn transaction_rollback_drop() { async fn copy_in() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id INTEGER,\ - name TEXT\ - )" - ).await.unwrap(); + client + .batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .await + .unwrap(); let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let stream = stream::iter(vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()].into_iter().map(Ok::<_, String>)); + let stream = stream::iter( + vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()] + .into_iter() + .map(Ok::<_, String>), + ); let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); assert_eq!(rows, 2); - let stmt = client.prepare("SELECT id, name FROM foo ORDER BY id").await.unwrap(); - let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + let stmt = client + .prepare("SELECT id, name FROM foo ORDER BY id") + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -441,12 +455,15 @@ async fn copy_in() { async fn copy_in_large() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id INTEGER,\ - name TEXT\ - )" - ).await.unwrap(); + client + .batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .await + .unwrap(); let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); @@ -469,20 +486,30 @@ async fn copy_in_large() { async fn copy_in_error() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo ( - id INTEGER,\ - name TEXT\ - )" - ).await.unwrap(); + client + .batch_execute( + "CREATE TEMPORARY TABLE foo ( + id INTEGER, + name TEXT + )", + ) + .await + .unwrap(); let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); let stream = stream::iter(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); let error = client.copy_in(&stmt, &[], stream).await.unwrap_err(); assert!(error.to_string().contains("asdf")); - let stmt = client.prepare("SELECT id, name FROM foo ORDER BY id").await.unwrap(); - let rows = client.query(&stmt, &[]).try_collect::>().await.unwrap(); + let stmt = client + .prepare("SELECT id, name FROM foo ORDER BY id") + .await + .unwrap(); + let rows = client + .query(&stmt, &[]) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 0); } From 81bc845bcf444139e8eb6868f661fe2b7d1a8b7b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 31 Jul 2019 19:44:16 -0700 Subject: [PATCH 208/819] Simplify logic a bit --- tokio-postgres/src/connection.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index b7b5fa170..6a9ae27a6 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -198,6 +198,14 @@ where return Ok(false); } + if let Poll::Pending = Pin::new(&mut self.stream) + .poll_ready(cx) + .map_err(Error::io)? + { + trace!("poll_write: waiting on socket"); + return Ok(false); + } + let request = match self.poll_request(cx) { Poll::Ready(Some(request)) => request, Poll::Ready(None) if self.responses.is_empty() && self.state == State::Active => { @@ -220,15 +228,6 @@ where } }; - if let Poll::Pending = Pin::new(&mut self.stream) - .poll_ready(cx) - .map_err(Error::io)? - { - trace!("poll_write: waiting on socket"); - self.pending_request = Some(request); - return Ok(false); - } - match request { RequestMessages::Single(request) => { Pin::new(&mut self.stream) From 9c178ad8160d5e291153c527fbf3af452d2e6529 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 31 Jul 2019 20:15:17 -0700 Subject: [PATCH 209/819] Support copy_out --- tokio-postgres/src/cancel_query.rs | 2 +- tokio-postgres/src/client.rs | 18 ++++++++-- tokio-postgres/src/connect.rs | 2 -- tokio-postgres/src/connect_raw.rs | 2 +- tokio-postgres/src/connect_socket.rs | 2 +- tokio-postgres/src/copy_out.rs | 52 ++++++++++++++++++++++++++++ tokio-postgres/src/lib.rs | 1 + tokio-postgres/tests/test/main.rs | 51 ++++++++++----------------- 8 files changed, 90 insertions(+), 40 deletions(-) create mode 100644 tokio-postgres/src/copy_out.rs diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index ebfea91a1..d7bb50474 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -1,7 +1,7 @@ use crate::client::SocketConfig; use crate::config::{Host, SslMode}; use crate::tls::MakeTlsConnect; -use crate::{cancel_query_raw, connect_socket, connect_tls, Error, Socket}; +use crate::{cancel_query_raw, connect_socket, Error, Socket}; use std::io; pub(crate) async fn cancel_query( diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 6971228fa..12eb60c74 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -7,11 +7,11 @@ use crate::tls::TlsConnect; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query, cancel_query_raw, copy_in, query, Transaction}; +use crate::{cancel_query, cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; -use bytes::IntoBuf; +use bytes::{Bytes, IntoBuf}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{future, Stream, TryStream}; @@ -266,6 +266,20 @@ impl Client { copy_in::copy_in(self.inner(), buf, stream) } + /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + pub fn copy_out( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Stream> { + let buf = query::encode(statement, params.iter().cloned()); + copy_out::copy_out(self.inner(), buf) + } + /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 86e4532d5..5197200ef 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -7,9 +7,7 @@ use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; use futures::future; use futures::{FutureExt, Stream}; use pin_utils::pin_mut; -use std::future::Future; use std::io; -use std::pin::Pin; use std::task::Poll; pub async fn connect( diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index a8f404dde..218d192cf 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -1,5 +1,5 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; -use crate::config::{Config, SslMode}; +use crate::config::{Config}; use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::{ChannelBinding, TlsConnect}; diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 01643ae63..3209b139e 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,5 +1,5 @@ use crate::config::Host; -use crate::{Config, Error, Socket}; +use crate::{Error, Socket}; use std::future::Future; use std::io; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs new file mode 100644 index 000000000..5c9157b28 --- /dev/null +++ b/tokio-postgres/src/copy_out.rs @@ -0,0 +1,52 @@ +use crate::client::{InnerClient, Responses}; +use crate::Error; +use bytes::Bytes; +use futures::{Stream, TryFutureExt, ready}; +use std::sync::Arc; +use crate::codec::FrontendMessage; +use postgres_protocol::message::backend::Message; +use std::pin::Pin; +use std::task::{Context, Poll}; +use crate::connection::RequestMessages; + +pub fn copy_out( + client: Arc, + buf: Result, Error>, +) -> impl Stream> { + start(client, buf) + .map_ok(|responses| CopyOut { responses }) + .try_flatten_stream() +} + +async fn start(client: Arc, buf: Result, Error>) -> Result { + let buf = buf?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + match responses.next().await? { + Message::CopyOutResponse(_) => {} + _ => return Err(Error::unexpected_message()), + } + + Ok(responses) +} + +struct CopyOut { + responses: Responses, +} + +impl Stream for CopyOut { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match ready!(self.responses.poll_next(cx)?) { + Message::CopyData(body) => Poll::Ready(Some(Ok(body.into_bytes()))), + Message::CopyDone => Poll::Ready(None), + _ => Poll::Ready(Some(Err(Error::unexpected_message()))), + } + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 43989ba8b..59d83932d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -126,6 +126,7 @@ mod cancel_query; mod cancel_query_raw; mod client; mod codec; +mod copy_out; pub mod config; #[cfg(feature = "runtime")] mod connect; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 8c8961119..4512fed70 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -513,6 +513,24 @@ async fn copy_in_error() { assert_eq!(rows.len(), 0); } +#[tokio::test] +async fn copy_out() { + let mut client = connect("user=postgres").await; + + client.batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + ); + + INSERT INTO foo (name) VALUES ('jim'), ('joe');" + ).await.unwrap(); + + let stmt = client.prepare("COPY foo TO STDOUT").await.unwrap(); + let data = client.copy_out(&stmt, &[]).try_concat().await.unwrap(); + assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); +} + /* #[test] fn query_portal() { @@ -602,39 +620,6 @@ fn notifications() { assert_eq!(notifications[1].payload(), "world"); } -#[test] -fn copy_out() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo ( - id SERIAL, - name TEXT - ); - INSERT INTO foo (name) VALUES ('jim'), ('joe');", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let data = runtime - .block_on( - client - .prepare("COPY foo TO STDOUT") - .and_then(|s| client.copy_out(&s, &[]).concat2()), - ) - .unwrap(); - assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); -} - #[test] fn poll_idle_running() { struct DelayStream(Delay); From e521e3b0a5bc23a079e1c060371350ff6a059857 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 31 Jul 2019 20:16:07 -0700 Subject: [PATCH 210/819] Transaction::copy_out --- tokio-postgres/src/transaction.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 403627452..e7f614886 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -7,7 +7,7 @@ use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{query, Client, Error, Row, SimpleQueryMessage, Statement}; -use bytes::IntoBuf; +use bytes::{Bytes, IntoBuf}; use futures::{Stream, TryStream}; use postgres_protocol::message::frontend; use std::error; @@ -138,6 +138,15 @@ impl<'a> Transaction<'a> { self.client.copy_in(statement, params, stream) } + /// Like `Client::copy_out`. + pub fn copy_out( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Stream> { + self.client.copy_out(statement, params) + } + /// Like `Client::simple_query`. pub fn simple_query( &mut self, From 5dccb9988a4008aaea700785aea931aeaefa3a46 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 31 Jul 2019 21:19:56 -0700 Subject: [PATCH 211/819] Test notifications --- tokio-postgres/src/connect_raw.rs | 2 +- tokio-postgres/src/connection.rs | 2 +- tokio-postgres/src/copy_out.rs | 8 +-- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/tests/test/main.rs | 94 +++++++++++++++---------------- 5 files changed, 51 insertions(+), 57 deletions(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 218d192cf..7b9fbd5e6 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -1,5 +1,5 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; -use crate::config::{Config}; +use crate::config::Config; use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::{ChannelBinding, TlsConnect}; diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 6a9ae27a6..4c8929c4d 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -292,7 +292,7 @@ where } pub fn poll_message( - mut self: Pin<&mut Self>, + &mut self, cx: &mut Context<'_>, ) -> Poll>> { let message = self.poll_read(cx)?; diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 5c9157b28..670c08f29 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,13 +1,13 @@ use crate::client::{InnerClient, Responses}; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; use crate::Error; use bytes::Bytes; -use futures::{Stream, TryFutureExt, ready}; -use std::sync::Arc; -use crate::codec::FrontendMessage; +use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; -use crate::connection::RequestMessages; pub fn copy_out( client: Arc, diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 59d83932d..520af8b03 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -126,7 +126,6 @@ mod cancel_query; mod cancel_query_raw; mod client; mod codec; -mod copy_out; pub mod config; #[cfg(feature = "runtime")] mod connect; @@ -136,6 +135,7 @@ mod connect_socket; mod connect_tls; mod connection; mod copy_in; +mod copy_out; pub mod error; mod maybe_tls_stream; mod prepare; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 4512fed70..c41cf7ab7 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,7 +1,8 @@ #![warn(rust_2018_idioms)] #![feature(async_await)] -use futures::stream; +use futures::channel::mpsc; +use futures::{future, stream, StreamExt}; use futures::{join, try_join, FutureExt, TryStreamExt}; use std::fmt::Write; use std::time::{Duration, Instant}; @@ -10,7 +11,7 @@ use tokio::timer::Delay; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{Client, Config, Connection, Error, SimpleQueryMessage}; +use tokio_postgres::{AsyncMessage, Client, Config, Connection, Error, SimpleQueryMessage}; mod parse; #[cfg(feature = "runtime")] @@ -517,20 +518,57 @@ async fn copy_in_error() { async fn copy_out() { let mut client = connect("user=postgres").await; - client.batch_execute( - "CREATE TEMPORARY TABLE foo ( + client + .batch_execute( + "CREATE TEMPORARY TABLE foo ( id SERIAL, name TEXT ); - INSERT INTO foo (name) VALUES ('jim'), ('joe');" - ).await.unwrap(); + INSERT INTO foo (name) VALUES ('jim'), ('joe');", + ) + .await + .unwrap(); let stmt = client.prepare("COPY foo TO STDOUT").await.unwrap(); let data = client.copy_out(&stmt, &[]).try_concat().await.unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } +#[tokio::test] +async fn notifications() { + let (mut client, mut connection) = connect_raw("user=postgres").await.unwrap(); + + let (tx, rx) = mpsc::unbounded(); + let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let connection = stream.forward(tx).map(|r| r.unwrap()); + tokio::spawn(connection); + + client + .batch_execute( + "LISTEN test_notifications; + NOTIFY test_notifications, 'hello'; + NOTIFY test_notifications, 'world';", + ) + .await + .unwrap(); + + drop(client); + + let notifications = rx + .filter_map(|m| match m { + AsyncMessage::Notification(n) => future::ready(Some(n)), + _ => future::ready(None), + }) + .collect::>() + .await; + assert_eq!(notifications.len(), 2); + assert_eq!(notifications[0].channel(), "test_notifications"); + assert_eq!(notifications[0].payload(), "hello"); + assert_eq!(notifications[1].channel(), "test_notifications"); + assert_eq!(notifications[1].payload(), "world"); +} + /* #[test] fn query_portal() { @@ -576,50 +614,6 @@ fn query_portal() { assert_eq!(r3.len(), 0); } -#[test] -fn notifications() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, mut connection) = runtime.block_on(connect("user=postgres")).unwrap(); - - let (tx, rx) = mpsc::unbounded(); - let connection = future::poll_fn(move || { - while let Some(message) = try_ready!(connection.poll_message().map_err(|e| panic!("{}", e))) - { - if let AsyncMessage::Notification(notification) = message { - debug!("received {}", notification.payload()); - tx.unbounded_send(notification).unwrap(); - } - } - - Ok(Async::Ready(())) - }); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "LISTEN test_notifications; - NOTIFY test_notifications, 'hello'; - NOTIFY test_notifications, 'world';", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - drop(client); - runtime.run().unwrap(); - - let notifications = rx.collect().wait().unwrap(); - assert_eq!(notifications.len(), 2); - assert_eq!(notifications[0].channel(), "test_notifications"); - assert_eq!(notifications[0].payload(), "hello"); - assert_eq!(notifications[1].channel(), "test_notifications"); - assert_eq!(notifications[1].payload(), "world"); -} - #[test] fn poll_idle_running() { struct DelayStream(Delay); From 785205ffb22f88fd10864f448103097d04cbc72d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 1 Aug 2019 18:40:14 -0700 Subject: [PATCH 212/819] Finish documenting everything --- tokio-postgres/src/client.rs | 4 ++++ tokio-postgres/src/connection.rs | 16 ++++++++++------ tokio-postgres/src/statement.rs | 4 ++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 12eb60c74..8808eb380 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -120,6 +120,10 @@ pub(crate) struct SocketConfig { pub keepalives_idle: Duration, } +/// An asynchronous PostgreSQL client. +/// +/// The client is one half of what is returned when a connection is established. Users interact with the database +/// through this client object. pub struct Client { inner: Arc, socket_config: Option, diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 4c8929c4d..c8d55a340 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -76,11 +76,6 @@ where } } - /// Returns the value of a runtime parameter for this connection. - pub fn parameter(&self, name: &str) -> Option<&str> { - self.parameters.get(name).map(|s| &**s) - } - fn poll_response( &mut self, cx: &mut Context<'_>, @@ -291,6 +286,15 @@ where } } + /// Returns the value of a runtime parameter for this connection. + pub fn parameter(&self, name: &str) -> Option<&str> { + self.parameters.get(name).map(|s| &**s) + } + + /// Polls for asynchronous messages from the server. + /// + /// The server can send notices as well as notifications asynchronously to the client. Applications that wish to + /// examine those messages should use this method to drive the connection rather than its `Future` implementation. pub fn poll_message( &mut self, cx: &mut Context<'_>, @@ -319,7 +323,7 @@ where type Output = Result<(), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - while let Some(_) = ready!(Pin::as_mut(&mut self).poll_message(cx)?) {} + while let Some(_) = ready!(self.poll_message(cx)?) {} Poll::Ready(Ok(())) } } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 09a7274f2..52028bb20 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -23,6 +23,9 @@ impl Drop for StatementInner { } } +/// A prepared statement. +/// +/// Prepared statements can only be used with the connection that created them. #[derive(Clone)] pub struct Statement(Arc); @@ -56,6 +59,7 @@ impl Statement { } } +/// Information about a column of a query. #[derive(Debug)] pub struct Column { name: String, From 760039627a1b94cdfaca3e5efad435d0440a1ee1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 1 Aug 2019 18:43:38 -0700 Subject: [PATCH 213/819] Fix build without the runtime feature --- tokio-postgres/src/client.rs | 7 ++++++- tokio-postgres/src/lib.rs | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 8808eb380..03f0d0314 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "runtime")] +use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; @@ -7,7 +9,7 @@ use crate::tls::TlsConnect; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query, cancel_query_raw, copy_in, copy_out, query, Transaction}; +use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; @@ -126,6 +128,7 @@ pub(crate) struct SocketConfig { /// through this client object. pub struct Client { inner: Arc, + #[cfg(feature = "runtime")] socket_config: Option, ssl_mode: SslMode, process_id: i32, @@ -149,6 +152,7 @@ impl Client { types: HashMap::new(), }), }), + #[cfg(feature = "runtime")] socket_config: None, ssl_mode, process_id, @@ -160,6 +164,7 @@ impl Client { self.inner.clone() } + #[cfg(feature = "runtime")] pub(crate) fn set_socket_config(&mut self, socket_config: SocketConfig) { self.socket_config = Some(socket_config); } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 520af8b03..199dfdb48 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -8,6 +8,7 @@ //! use futures::{FutureExt, TryStreamExt}; //! use tokio_postgres::{NoTls, Error, Row}; //! +//! # #[cfg(not(feature = "runtime"))] fn main() {} //! # #[cfg(feature = "runtime")] //! #[tokio::main] // By default, tokio_postgres uses the tokio crate as its runtime. //! async fn main() -> Result<(), Error> { From e4a1ec23a158647eaac911d02b6ad2aa9e3be4ad Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 1 Aug 2019 18:44:38 -0700 Subject: [PATCH 214/819] Fix impl ordering --- tokio-postgres/src/lib.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 199dfdb48..6734a0dda 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -177,6 +177,23 @@ pub struct Notification { payload: String, } +impl Notification { + /// The process ID of the notifying backend process. + pub fn process_id(&self) -> i32 { + self.process_id + } + + /// The name of the channel that the notify has been raised on. + pub fn channel(&self) -> &str { + &self.channel + } + + /// The "payload" string passed from the notifying process. + pub fn payload(&self) -> &str { + &self.payload + } +} + /// An asynchronous message from the server. #[allow(clippy::large_enum_variant)] pub enum AsyncMessage { @@ -203,20 +220,3 @@ pub enum SimpleQueryMessage { #[doc(hidden)] __NonExhaustive, } - -impl Notification { - /// The process ID of the notifying backend process. - pub fn process_id(&self) -> i32 { - self.process_id - } - - /// The name of the channel that the notify has been raised on. - pub fn channel(&self) -> &str { - &self.channel - } - - /// The "payload" string passed from the notifying process. - pub fn payload(&self) -> &str { - &self.payload - } -} From 26a17ac4ed88a2fcb607a1990bee7f8074ffa6f1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 1 Aug 2019 20:43:13 -0700 Subject: [PATCH 215/819] Support portals --- tokio-postgres/src/bind.rs | 45 +++++++++++++++++++++++++++++ tokio-postgres/src/lib.rs | 3 ++ tokio-postgres/src/portal.rs | 48 +++++++++++++++++++++++++++++++ tokio-postgres/src/query.rs | 48 +++++++++++++++++++++++++------ tokio-postgres/src/transaction.rs | 48 ++++++++++++++++++++++++++++++- tokio-postgres/tests/test/main.rs | 43 +++++++++++++++++++++++++++ 6 files changed, 225 insertions(+), 10 deletions(-) create mode 100644 tokio-postgres/src/bind.rs create mode 100644 tokio-postgres/src/portal.rs diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs new file mode 100644 index 000000000..997190a9d --- /dev/null +++ b/tokio-postgres/src/bind.rs @@ -0,0 +1,45 @@ +use crate::client::InnerClient; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +use crate::types::ToSql; +use crate::{query, Error, Portal, Statement}; +use postgres_protocol::message::backend::Message; +use postgres_protocol::message::frontend; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +static NEXT_ID: AtomicUsize = AtomicUsize::new(0); + +pub async fn bind( + client: Arc, + statement: Statement, + bind: Result, +) -> Result { + let bind = bind?; + + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(bind.buf)))?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + Ok(Portal::new(&client, bind.name, statement)) +} + +pub struct PendingBind { + buf: Vec, + name: String, +} + +pub fn encode<'a, I>(statement: &Statement, params: I) -> Result +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); + let mut buf = query::encode_bind(statement, params, &name)?; + frontend::sync(&mut buf); + + Ok(PendingBind { buf, name }) +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 6734a0dda..0c1599026 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -113,6 +113,7 @@ pub use crate::config::Config; pub use crate::connection::Connection; use crate::error::DbError; pub use crate::error::Error; +pub use crate::portal::Portal; pub use crate::row::{Row, SimpleQueryRow}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; @@ -122,6 +123,7 @@ pub use crate::tls::NoTls; pub use crate::transaction::Transaction; pub use statement::{Column, Statement}; +mod bind; #[cfg(feature = "runtime")] mod cancel_query; mod cancel_query_raw; @@ -139,6 +141,7 @@ mod copy_in; mod copy_out; pub mod error; mod maybe_tls_stream; +mod portal; mod prepare; mod query; pub mod row; diff --git a/tokio-postgres/src/portal.rs b/tokio-postgres/src/portal.rs new file mode 100644 index 000000000..f2cb2ac3e --- /dev/null +++ b/tokio-postgres/src/portal.rs @@ -0,0 +1,48 @@ +use crate::client::InnerClient; +use crate::codec::FrontendMessage; +use crate::connection::RequestMessages; +use crate::Statement; +use postgres_protocol::message::frontend; +use std::sync::{Arc, Weak}; + +struct Inner { + client: Weak, + name: String, + statement: Statement, +} + +impl Drop for Inner { + fn drop(&mut self) { + if let Some(client) = self.client.upgrade() { + let mut buf = vec![]; + frontend::close(b'P', &self.name, &mut buf).expect("portal name not valid"); + frontend::sync(&mut buf); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + } +} + +/// A portal. +/// +/// Portals can only be used with the connection that created them, and only exist for the duration of the transaction +/// in which they were created. +#[derive(Clone)] +pub struct Portal(Arc); + +impl Portal { + pub(crate) fn new(client: &Arc, name: String, statement: Statement) -> Portal { + Portal(Arc::new(Inner { + client: Arc::downgrade(client), + name, + statement, + })) + } + + pub(crate) fn name(&self) -> &str { + &self.0.name + } + + pub(crate) fn statement(&self) -> &Statement { + &self.0.statement + } +} diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 1f23c5eba..234281872 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -2,7 +2,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; -use crate::{Error, Row, Statement}; +use crate::{Error, Portal, Row, Statement}; use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -23,6 +23,27 @@ pub fn query( .try_flatten_stream() } +pub fn query_portal( + client: Arc, + portal: Portal, + max_rows: i32, +) -> impl Stream> { + let start = async move { + let mut buf = vec![]; + frontend::execute(portal.name(), max_rows, &mut buf).map_err(Error::encode)?; + frontend::sync(&mut buf); + + let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + Ok(Query { + statement: portal.statement().clone(), + responses, + }) + }; + + start.try_flatten_stream() +} + pub async fn execute(client: Arc, buf: Result, Error>) -> Result { let mut responses = start(client, buf).await?; @@ -59,6 +80,18 @@ async fn start(client: Arc, buf: Result, Error>) -> Result< } pub fn encode<'a, I>(statement: &Statement, params: I) -> Result, Error> +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let mut buf = encode_bind(statement, params, "")?; + frontend::execute("", 0, &mut buf).map_err(Error::encode)?; + frontend::sync(&mut buf); + + Ok(buf) +} + +pub fn encode_bind<'a, I>(statement: &Statement, params: I, portal: &str) -> Result, Error> where I: IntoIterator, I::IntoIter: ExactSizeIterator, @@ -76,7 +109,7 @@ where let mut error_idx = 0; let r = frontend::bind( - "", + portal, statement.name(), Some(1), params.zip(statement.params()).enumerate(), @@ -92,15 +125,10 @@ where &mut buf, ); match r { - Ok(()) => {} + Ok(()) => Ok(buf), Err(frontend::BindError::Conversion(e)) => return Err(Error::to_sql(e, error_idx)), Err(frontend::BindError::Serialization(e)) => return Err(Error::encode(e)), } - - frontend::execute("", 0, &mut buf).map_err(Error::encode)?; - frontend::sync(&mut buf); - - Ok(buf) } struct Query { @@ -116,7 +144,9 @@ impl Stream for Query { Message::DataRow(body) => { Poll::Ready(Some(Ok(Row::new(self.statement.clone(), body)?))) } - Message::EmptyQueryResponse | Message::CommandComplete(_) => Poll::Ready(None), + Message::EmptyQueryResponse + | Message::CommandComplete(_) + | Message::PortalSuspended => Poll::Ready(None), Message::ErrorResponse(body) => Poll::Ready(Some(Err(Error::db(body)))), _ => Poll::Ready(Some(Err(Error::unexpected_message()))), } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index e7f614886..c1271d217 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -6,7 +6,7 @@ use crate::tls::TlsConnect; use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{query, Client, Error, Row, SimpleQueryMessage, Statement}; +use crate::{bind, query, Client, Error, Portal, Row, SimpleQueryMessage, Statement}; use bytes::{Bytes, IntoBuf}; use futures::{Stream, TryStream}; use postgres_protocol::message::frontend; @@ -122,6 +122,52 @@ impl<'a> Transaction<'a> { query::execute(self.client.inner(), buf) } + /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. + /// + /// Portals only last for the duration of the transaction in which they are created, and can only be used on the + /// connection that created them. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + pub fn bind( + &mut self, + statement: &Statement, + params: &[&dyn ToSql], + ) -> impl Future> { + // https://github.com/rust-lang/rust/issues/63032 + let buf = bind::encode(statement, params.iter().cloned()); + bind::bind(self.client.inner(), statement.clone(), buf) + } + + /// Like [`bind`], but takes an iterator of parameters rather than a slice. + /// + /// [`bind`]: #method.bind + pub fn bind_iter<'b, I>( + &mut self, + statement: &Statement, + params: I, + ) -> impl Future> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let buf = bind::encode(statement, params); + bind::bind(self.client.inner(), statement.clone(), buf) + } + + /// Continues execution of a portal, returning a stream of the resulting rows. + /// + /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to + /// `query_portal`. If the requested number is negative or 0, all rows will be returned. + pub fn query_portal( + &mut self, + portal: &Portal, + max_rows: i32, + ) -> impl Stream> { + query::query_portal(self.client.inner(), portal.clone(), max_rows) + } + /// Like `Client::copy_in`. pub fn copy_in( &mut self, diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index c41cf7ab7..3929c7b88 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -569,6 +569,49 @@ async fn notifications() { assert_eq!(notifications[1].payload(), "world"); } +#[tokio::test] +async fn query_portal() { + let mut client = connect("user=postgres").await; + + client + .batch_execute( + "CREATE TEMPORARY TABLE foo ( + id SERIAL, + name TEXT + ); + + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie');", + ) + .await + .unwrap(); + + let stmt = client + .prepare("SELECT id, name FROM foo ORDER BY id") + .await + .unwrap(); + + let mut transaction = client.transaction().await.unwrap(); + + let portal = transaction.bind(&stmt, &[]).await.unwrap(); + let f1 = transaction.query_portal(&portal, 2).try_collect::>(); + let f2 = transaction.query_portal(&portal, 2).try_collect::>(); + let f3 = transaction.query_portal(&portal, 2).try_collect::>(); + + let (r1, r2, r3) = try_join!(f1, f2, f3).unwrap(); + + assert_eq!(r1.len(), 2); + assert_eq!(r1[0].get::<_, i32>(0), 1); + assert_eq!(r1[0].get::<_, &str>(1), "alice"); + assert_eq!(r1[1].get::<_, i32>(0), 2); + assert_eq!(r1[1].get::<_, &str>(1), "bob"); + + assert_eq!(r2.len(), 1); + assert_eq!(r2[0].get::<_, i32>(0), 3); + assert_eq!(r2[0].get::<_, &str>(1), "charlie"); + + assert_eq!(r3.len(), 0); +} + /* #[test] fn query_portal() { From 048e91a57de6f28aefbde44a044f1c17e7f413f0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 2 Aug 2019 20:30:36 -0700 Subject: [PATCH 216/819] Use flush combinator --- tokio-postgres/src/cancel_query_raw.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index ecf85600b..08448ad62 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -23,9 +23,7 @@ where frontend::cancel_request(process_id, secret_key, &mut buf); stream.write_all(&buf).await.map_err(Error::io)?; - future::poll_fn(|cx| Pin::new(&mut stream).poll_flush(cx)) - .await - .map_err(Error::io)?; + stream.flush().await.map_err(Error::io)?; future::poll_fn(|cx| Pin::new(&mut stream).poll_shutdown(cx)) .await .map_err(Error::io)?; From 2a97c20bf26a0ea4169150b0e118731bb8336305 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 2 Aug 2019 20:31:15 -0700 Subject: [PATCH 217/819] Clean up old test --- tokio-postgres/tests/test/main.rs | 44 ------------------------------- 1 file changed, 44 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 3929c7b88..234e57a26 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -613,50 +613,6 @@ async fn query_portal() { } /* -#[test] -fn query_portal() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - runtime - .block_on( - client - .simple_query( - "CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT); - INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('charlie'); - BEGIN;", - ) - .for_each(|_| Ok(())), - ) - .unwrap(); - - let statement = runtime - .block_on(client.prepare("SELECT id, name FROM foo ORDER BY id")) - .unwrap(); - let portal = runtime.block_on(client.bind(&statement, &[])).unwrap(); - - let f1 = client.query_portal(&portal, 2).collect(); - let f2 = client.query_portal(&portal, 2).collect(); - let f3 = client.query_portal(&portal, 2).collect(); - let (r1, r2, r3) = runtime.block_on(f1.join3(f2, f3)).unwrap(); - - assert_eq!(r1.len(), 2); - assert_eq!(r1[0].get::<_, i32>(0), 1); - assert_eq!(r1[0].get::<_, &str>(1), "alice"); - assert_eq!(r1[1].get::<_, i32>(0), 2); - assert_eq!(r1[1].get::<_, &str>(1), "bob"); - - assert_eq!(r2.len(), 1); - assert_eq!(r2[0].get::<_, i32>(0), 3); - assert_eq!(r2[0].get::<_, &str>(1), "charlie"); - - assert_eq!(r3.len(), 0); -} - #[test] fn poll_idle_running() { struct DelayStream(Delay); From ad31290f5fb7ebe0ab908a48fccd4ab21a587d5f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 2 Aug 2019 20:49:22 -0700 Subject: [PATCH 218/819] Update postgres-openssl --- postgres-openssl/Cargo.toml | 10 ++--- postgres-openssl/src/lib.rs | 45 ++++++++----------- postgres-openssl/src/test.rs | 87 +++++++++++++++++++----------------- 3 files changed, 70 insertions(+), 72 deletions(-) diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index a45f8acbc..4c743152f 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures = "0.1" +futures-preview = "0.3.0-alpha.17" openssl = "0.10" -tokio-io = "0.1" -tokio-openssl = "0.3" +tokio-io = { git = "https://github.com/tokio-rs/tokio" } +tokio-openssl = { git = "https://github.com/sfackler/tokio-openssl", branch = "tokio-02" } tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "0.1.7" -postgres = { version = "0.16.0-rc.1", path = "../postgres" } +tokio = { git = "https://github.com/tokio-rs/tokio" } +#postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index c1999b142..bf210c269 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -41,20 +41,22 @@ //! ``` #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +#![feature(async_await)] -use futures::{try_ready, Async, Future, Poll}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; use openssl::nid::Nid; #[cfg(feature = "runtime")] use openssl::ssl::SslConnector; -use openssl::ssl::{ConnectConfiguration, HandshakeError, SslRef}; +use openssl::ssl::{ConnectConfiguration, SslRef}; use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_openssl::{ConnectAsync, ConnectConfigurationExt, SslStream}; +use tokio_openssl::{HandshakeError, SslStream}; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; @@ -96,7 +98,7 @@ impl MakeTlsConnector { #[cfg(feature = "runtime")] impl MakeTlsConnect for MakeTlsConnector where - S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, + S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, { type Stream = SslStream; type TlsConnect = TlsConnector; @@ -127,36 +129,27 @@ impl TlsConnector { impl TlsConnect for TlsConnector where - S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, + S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, { type Stream = SslStream; type Error = HandshakeError; - type Future = TlsConnectFuture; + type Future = Pin< + Box, ChannelBinding), HandshakeError>> + Send>, + >; - fn connect(self, stream: S) -> TlsConnectFuture { - TlsConnectFuture(self.ssl.connect_async(&self.domain, stream)) - } -} - -/// The future returned by `TlsConnector`. -pub struct TlsConnectFuture(ConnectAsync); - -impl Future for TlsConnectFuture -where - S: AsyncRead + AsyncWrite + Debug + 'static + Sync + Send, -{ - type Item = (SslStream, ChannelBinding); - type Error = HandshakeError; + fn connect(self, stream: S) -> Self::Future { + let future = async move { + let stream = tokio_openssl::connect(self.ssl, &self.domain, stream).await?; - fn poll(&mut self) -> Poll<(SslStream, ChannelBinding), HandshakeError> { - let stream = try_ready!(self.0.poll()); + let channel_binding = match tls_server_end_point(stream.ssl()) { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + }; - let channel_binding = match tls_server_end_point(stream.get_ref().ssl()) { - Some(buf) => ChannelBinding::tls_server_end_point(buf), - None => ChannelBinding::none(), + Ok((stream, channel_binding)) }; - Ok(Async::Ready((stream, channel_binding))) + Box::pin(future) } } diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index c3923a33b..927651c69 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -1,90 +1,95 @@ -use futures::{Future, Stream}; +use futures::{FutureExt, TryStreamExt}; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; -use tokio::runtime::current_thread::Runtime; use tokio_postgres::tls::TlsConnect; use super::*; -fn smoke_test(s: &str, tls: T) +async fn smoke_test(s: &str, tls: T) where T: TlsConnect, - T::Stream: 'static, + T::Stream: 'static + Send, { - let mut runtime = Runtime::new().unwrap(); + let stream = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .await + .unwrap(); let builder = s.parse::().unwrap(); + let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); - let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.connect_raw(s, tls)); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let connection = connection.map(|r| r.unwrap()); + tokio::spawn(connection); - let prepare = client.prepare("SELECT 1::INT4"); - let statement = runtime.block_on(prepare).unwrap(); - let select = client.query(&statement, &[]).collect().map(|rows| { - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].get::<_, i32>(0), 1); - }); - runtime.block_on(select).unwrap(); + let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); + let rows = client + .query(&stmt, &[&1i32]) + .try_collect::>() + .await + .unwrap(); - drop(statement); - drop(client); - runtime.run().unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); } -#[test] -fn require() { +#[tokio::test] +async fn require() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( "user=ssl_user dbname=postgres sslmode=require", TlsConnector::new(ctx.configure().unwrap(), "localhost"), - ); + ) + .await; } -#[test] -fn prefer() { +#[tokio::test] +async fn prefer() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( "user=ssl_user dbname=postgres", TlsConnector::new(ctx.configure().unwrap(), "localhost"), - ); + ) + .await; } -#[test] -fn scram_user() { +#[tokio::test] +async fn scram_user() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); let ctx = builder.build(); smoke_test( "user=scram_user password=password dbname=postgres sslmode=require", TlsConnector::new(ctx.configure().unwrap(), "localhost"), - ); + ) + .await; } -#[test] +#[tokio::test] #[cfg(feature = "runtime")] -fn runtime() { - let mut runtime = Runtime::new().unwrap(); - +async fn runtime() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); builder.set_ca_file("../test/server.crt").unwrap(); let connector = MakeTlsConnector::new(builder.build()); - let connect = tokio_postgres::connect( + let (mut client, connection) = tokio_postgres::connect( "host=localhost port=5433 user=postgres sslmode=require", connector, - ); - let (mut client, connection) = runtime.block_on(connect).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + ) + .await + .unwrap(); + let connection = connection.map(|r| r.unwrap()); + tokio::spawn(connection); + + let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); + let rows = client + .query(&stmt, &[&1i32]) + .try_collect::>() + .await + .unwrap(); - let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); - runtime.block_on(execute).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); } From 59923bd4255e8012f0cdddb6773bcab9302ccda0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 2 Aug 2019 21:03:54 -0700 Subject: [PATCH 219/819] Update postgres-native-tls --- postgres-native-tls/Cargo.toml | 10 ++-- postgres-native-tls/src/lib.rs | 39 ++++++--------- postgres-native-tls/src/test.rs | 88 +++++++++++++++++---------------- 3 files changed, 64 insertions(+), 73 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 7d891acd3..a204d54c4 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures = "0.1" +futures-preview = "0.3.0-alpha.17" native-tls = "0.2" -tokio-io = "0.1" -tokio-tls = "0.2.1" +tokio-io = { git = "https://github.com/tokio-rs/tokio" } +tokio-tls = { git = "https://github.com/tokio-rs/tokio" } tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "0.1.7" -postgres = { version = "0.16.0-rc.1", path = "../postgres" } +tokio = { git = "https://github.com/tokio-rs/tokio" } +#postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index a68130380..32809f59c 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -47,13 +47,15 @@ //! ``` #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +#![feature(async_await)] -use futures::{try_ready, Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; -use tokio_tls::{Connect, TlsStream}; +use tokio_tls::TlsStream; +use std::pin::Pin; +use std::future::Future; #[cfg(test)] mod test; @@ -76,7 +78,7 @@ impl MakeTlsConnector { #[cfg(feature = "runtime")] impl MakeTlsConnect for MakeTlsConnector where - S: AsyncRead + AsyncWrite, + S: AsyncRead + AsyncWrite + Unpin + 'static + Send, { type Stream = TlsStream; type TlsConnect = TlsConnector; @@ -105,35 +107,22 @@ impl TlsConnector { impl TlsConnect for TlsConnector where - S: AsyncRead + AsyncWrite, + S: AsyncRead + AsyncWrite + Unpin + 'static + Send, { type Stream = TlsStream; type Error = native_tls::Error; - type Future = TlsConnectFuture; + type Future = Pin, ChannelBinding), native_tls::Error>> + Send>>; - fn connect(self, stream: S) -> TlsConnectFuture { - TlsConnectFuture(self.connector.connect(&self.domain, stream)) - } -} - -/// The future returned by `TlsConnector`. -pub struct TlsConnectFuture(Connect); - -impl Future for TlsConnectFuture -where - S: AsyncRead + AsyncWrite, -{ - type Item = (TlsStream, ChannelBinding); - type Error = native_tls::Error; + fn connect(self, stream: S) -> Self::Future { + let future = async move { + let stream = self.connector.connect(&self.domain, stream).await?; - fn poll(&mut self) -> Poll<(TlsStream, ChannelBinding), native_tls::Error> { - let stream = try_ready!(self.0.poll()); + // FIXME https://github.com/tokio-rs/tokio/issues/1383 + let channel_binding = ChannelBinding::none(); - let channel_binding = match stream.get_ref().tls_server_end_point().unwrap_or(None) { - Some(buf) => ChannelBinding::tls_server_end_point(buf), - None => ChannelBinding::none(), + Ok((stream, channel_binding)) }; - Ok(Async::Ready((stream, channel_binding))) + Box::pin(future) } } diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 43aab31f7..81f93398f 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,44 +1,40 @@ -use futures::{Future, Stream}; use native_tls::{self, Certificate}; use tokio::net::TcpStream; -use tokio::runtime::current_thread::Runtime; use tokio_postgres::tls::TlsConnect; +use futures::{FutureExt, TryStreamExt}; #[cfg(feature = "runtime")] use crate::MakeTlsConnector; use crate::TlsConnector; -fn smoke_test(s: &str, tls: T) -where - T: TlsConnect, - T::Stream: 'static, +async fn smoke_test(s: &str, tls: T) + where + T: TlsConnect, + T::Stream: 'static + Send, { - let mut runtime = Runtime::new().unwrap(); + let stream = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + .await + .unwrap(); let builder = s.parse::().unwrap(); + let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); - let handshake = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .map_err(|e| panic!("{}", e)) - .and_then(|s| builder.connect_raw(s, tls)); - let (mut client, connection) = runtime.block_on(handshake).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + let connection = connection.map(|r| r.unwrap()); + tokio::spawn(connection); - let prepare = client.prepare("SELECT 1::INT4"); - let statement = runtime.block_on(prepare).unwrap(); - let select = client.query(&statement, &[]).collect().map(|rows| { - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].get::<_, i32>(0), 1); - }); - runtime.block_on(select).unwrap(); + let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); + let rows = client + .query(&stmt, &[&1i32]) + .try_collect::>() + .await + .unwrap(); - drop(statement); - drop(client); - runtime.run().unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); } -#[test] -fn require() { +#[tokio::test] +async fn require() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), @@ -48,11 +44,11 @@ fn require() { smoke_test( "user=ssl_user dbname=postgres sslmode=require", TlsConnector::new(connector, "localhost"), - ); + ).await; } -#[test] -fn prefer() { +#[tokio::test] +async fn prefer() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), @@ -62,11 +58,11 @@ fn prefer() { smoke_test( "user=ssl_user dbname=postgres", TlsConnector::new(connector, "localhost"), - ); + ).await; } -#[test] -fn scram_user() { +#[tokio::test] +async fn scram_user() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), @@ -76,14 +72,12 @@ fn scram_user() { smoke_test( "user=scram_user password=password dbname=postgres sslmode=require", TlsConnector::new(connector, "localhost"), - ); + ).await; } -#[test] +#[tokio::test] #[cfg(feature = "runtime")] -fn runtime() { - let mut runtime = Runtime::new().unwrap(); - +async fn runtime() { let connector = native_tls::TlsConnector::builder() .add_root_certificate( Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), @@ -92,14 +86,22 @@ fn runtime() { .unwrap(); let connector = MakeTlsConnector::new(connector); - let connect = tokio_postgres::connect( + let (mut client, connection) = tokio_postgres::connect( "host=localhost port=5433 user=postgres sslmode=require", connector, - ); - let (mut client, connection) = runtime.block_on(connect).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.spawn(connection); + ) + .await + .unwrap(); + let connection = connection.map(|r| r.unwrap()); + tokio::spawn(connection); + + let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); + let rows = client + .query(&stmt, &[&1i32]) + .try_collect::>() + .await + .unwrap(); - let execute = client.simple_query("SELECT 1").for_each(|_| Ok(())); - runtime.block_on(execute).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); } From f5a8b1de68c2410b697e59bcbeba77793427e86e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Aug 2019 18:09:27 -0700 Subject: [PATCH 220/819] Update postgres --- postgres/Cargo.toml | 9 +- postgres/src/client.rs | 64 ++++++-------- postgres/src/config.rs | 69 ++++++++------- postgres/src/copy_in_stream.rs | 25 ++++++ postgres/src/copy_out_reader.rs | 36 +++++--- postgres/src/iter.rs | 45 ++++++++++ postgres/src/lib.rs | 10 +-- postgres/src/query_iter.rs | 38 --------- postgres/src/query_portal_iter.rs | 38 --------- postgres/src/simple_query_iter.rs | 38 --------- postgres/src/test.rs | 21 +++++ postgres/src/to_statement.rs | 33 +++++++- postgres/src/transaction.rs | 134 ++++++++++++------------------ tokio-postgres/src/client.rs | 7 ++ tokio-postgres/src/config.rs | 2 +- tokio-postgres/src/connection.rs | 5 ++ 16 files changed, 283 insertions(+), 291 deletions(-) create mode 100644 postgres/src/copy_in_stream.rs create mode 100644 postgres/src/iter.rs delete mode 100644 postgres/src/query_iter.rs delete mode 100644 postgres/src/query_portal_iter.rs delete mode 100644 postgres/src/simple_query_iter.rs diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 5200f788b..3e24504fd 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -30,9 +30,14 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures = "0.1" +futures-preview = "0.3.0-alpha.17" +pin-utils = "0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } +tokio-executor = { git = "https://github.com/tokio-rs/tokio" } -tokio = { version = "0.1", optional = true } +tokio = { git = "https://github.com/tokio-rs/tokio", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } + +[dev-dependencies] +tokio = { git = "https://github.com/tokio-rs/tokio" } diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c811e74d1..e4a1e3820 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,15 +1,18 @@ use fallible_iterator::FallibleIterator; -use futures::{Async, Future, Poll, Stream}; -use std::io::{self, Read}; +use futures::executor; +use std::io::{BufRead, Read}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; #[cfg(feature = "runtime")] use tokio_postgres::Socket; use tokio_postgres::{Error, Row, SimpleQueryMessage}; +use crate::copy_in_stream::CopyInStream; +use crate::copy_out_reader::CopyOutReader; +use crate::iter::Iter; #[cfg(feature = "runtime")] use crate::Config; -use crate::{CopyOutReader, QueryIter, SimpleQueryIter, Statement, ToStatement, Transaction}; +use crate::{Statement, ToStatement, Transaction}; /// A synchronous PostgreSQL client. /// @@ -82,7 +85,7 @@ impl Client { T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - self.0.execute(&statement, params).wait() + executor::block_on(self.0.execute(&statement, params)) } /// Executes a statement, returning the resulting rows. @@ -149,16 +152,16 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query_iter( - &mut self, + pub fn query_iter<'a, T>( + &'a mut self, query: &T, params: &[&dyn ToSql], - ) -> Result, Error> + ) -> Result + 'a, Error> where T: ?Sized + ToStatement, { let statement = query.__statement(self)?; - Ok(QueryIter::new(self.0.query(&statement, params))) + Ok(Iter::new(self.0.query(&statement, params))) } /// Creates a new prepared statement. @@ -185,7 +188,7 @@ impl Client { /// # } /// ``` pub fn prepare(&mut self, query: &str) -> Result { - self.0.prepare(query).wait() + executor::block_on(self.0.prepare(query)) } /// Like `prepare`, but allows the types of query parameters to be explicitly specified. @@ -216,7 +219,7 @@ impl Client { /// # } /// ``` pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.0.prepare_typed(query, types).wait() + executor::block_on(self.0.prepare_typed(query, types)) } /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. @@ -244,12 +247,10 @@ impl Client { ) -> Result where T: ?Sized + ToStatement, - R: Read, + R: Read + Unpin, { let statement = query.__statement(self)?; - self.0 - .copy_in(&statement, params, CopyInStream(reader)) - .wait() + executor::block_on(self.0.copy_in(&statement, params, CopyInStream(reader))) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -271,11 +272,11 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn copy_out( - &mut self, + pub fn copy_out<'a, T>( + &'a mut self, query: &T, params: &[&dyn ToSql], - ) -> Result, Error> + ) -> Result where T: ?Sized + ToStatement, { @@ -311,8 +312,11 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { - Ok(SimpleQueryIter::new(self.0.simple_query(query))) + pub fn simple_query_iter<'a>( + &'a mut self, + query: &str, + ) -> Result + 'a, Error> { + Ok(Iter::new(self.0.simple_query(query))) } /// Begins a new database transaction. @@ -336,8 +340,8 @@ impl Client { /// # } /// ``` pub fn transaction(&mut self) -> Result, Error> { - self.simple_query("BEGIN")?; - Ok(Transaction::new(self)) + let transaction = executor::block_on(self.0.transaction())?; + Ok(Transaction::new(transaction)) } /// Determines if the client's connection has already closed. @@ -368,21 +372,3 @@ impl From for Client { Client(c) } } - -struct CopyInStream(R); - -impl Stream for CopyInStream -where - R: Read, -{ - type Item = Vec; - type Error = io::Error; - - fn poll(&mut self) -> Poll>, io::Error> { - let mut buf = vec![]; - match self.0.by_ref().take(4096).read_to_end(&mut buf)? { - 0 => Ok(Async::Ready(None)), - _ => Ok(Async::Ready(Some(buf))), - } - } -} diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b04c03f87..2c2fa6554 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -2,25 +2,22 @@ //! //! Requires the `runtime` Cargo feature (enabled by default). -use futures::future::Executor; -use futures::sync::oneshot; -use futures::Future; +use futures::FutureExt; use log::error; use std::fmt; use std::path::Path; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{mpsc, Arc, Mutex}; use std::time::Duration; +use tokio_executor::Executor; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; #[doc(inline)] -use tokio_postgres::config::{SslMode, TargetSessionAttrs}; +pub use tokio_postgres::config::{SslMode, TargetSessionAttrs}; use crate::{Client, RUNTIME}; -type DynExecutor = dyn Executor + Send>> + Sync + Send; - /// Connection configuration. /// /// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: @@ -98,7 +95,7 @@ type DynExecutor = dyn Executor + Send>> + pub struct Config { config: tokio_postgres::Config, // this is an option since we don't want to boot up our default runtime unless we're actually going to use it. - executor: Option>, + executor: Option>>, } impl fmt::Debug for Config { @@ -242,44 +239,52 @@ impl Config { /// Defaults to a postgres-specific tokio `Runtime`. pub fn executor(&mut self, executor: E) -> &mut Config where - E: Executor + Send>> + 'static + Sync + Send, + E: Executor + 'static + Sync + Send, { - self.executor = Some(Arc::new(executor)); + self.executor = Some(Arc::new(Mutex::new(executor))); self } /// Opens a connection to a PostgreSQL database. - pub fn connect(&self, tls_mode: T) -> Result + pub fn connect(&self, tls: T) -> Result where T: MakeTlsConnect + 'static + Send, T::TlsConnect: Send, T::Stream: Send, >::Future: Send, { - let (tx, rx) = oneshot::channel(); - let connect = self - .config - .connect(tls_mode) - .then(|r| tx.send(r).map_err(|_| ())); - self.with_executor(|e| e.execute(Box::new(connect))) - .unwrap(); - let (client, connection) = rx.wait().unwrap()?; - - let connection = connection.map_err(|e| error!("postgres connection error: {}", e)); - self.with_executor(|e| e.execute(Box::new(connection))) - .unwrap(); - - Ok(Client::from(client)) - } + let (client, connection) = match &self.executor { + Some(executor) => { + let (tx, rx) = mpsc::channel(); + let config = self.config.clone(); + let connect = async move { + let r = config.connect(tls).await; + let _ = tx.send(r); + }; + executor.lock().unwrap().spawn(Box::pin(connect)).unwrap(); + rx.recv().unwrap()? + } + None => { + let connect = self.config.connect(tls); + RUNTIME.block_on(connect)? + } + }; - fn with_executor(&self, f: F) -> T - where - F: FnOnce(&dyn Executor + Send>>) -> T, - { + let connection = connection.map(|r| { + if let Err(e) = r { + error!("postgres connection error: {}", e) + } + }); match &self.executor { - Some(e) => f(&**e), - None => f(&RUNTIME.executor()), + Some(executor) => { + executor.lock().unwrap().spawn(Box::pin(connection)).unwrap(); + }, + None => { + RUNTIME.spawn(connection); + } } + + Ok(Client::from(client)) } } diff --git a/postgres/src/copy_in_stream.rs b/postgres/src/copy_in_stream.rs new file mode 100644 index 000000000..0dc2f0bbb --- /dev/null +++ b/postgres/src/copy_in_stream.rs @@ -0,0 +1,25 @@ +use futures::Stream; +use std::io; +use std::io::Read; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pub struct CopyInStream(pub R); + +impl Stream for CopyInStream +where + R: Read + Unpin, +{ + type Item = io::Result>; + + fn poll_next( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll>>> { + let mut buf = vec![]; + match self.0.by_ref().take(4096).read_to_end(&mut buf)? { + 0 => Poll::Ready(None), + _ => Poll::Ready(Some(Ok(buf))), + } + } +} diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index ff0c30a24..680d4d318 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,26 +1,34 @@ use bytes::{Buf, Bytes}; -use futures::stream::{self, Stream}; +use futures::{executor, Stream}; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; -use tokio_postgres::impls; +use std::pin::Pin; use tokio_postgres::Error; /// The reader returned by the `copy_out` method. -pub struct CopyOutReader<'a> { - it: stream::Wait, +pub struct CopyOutReader<'a, S> +where + S: Stream, +{ + it: executor::BlockingStream>>, cur: Cursor, _p: PhantomData<&'a mut ()>, } // no-op impl to extend borrow until drop -impl<'a> Drop for CopyOutReader<'a> { +impl<'a, S> Drop for CopyOutReader<'a, S> +where + S: Stream, +{ fn drop(&mut self) {} } -impl<'a> CopyOutReader<'a> { - #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(stream: impls::CopyOut) -> Result, Error> { - let mut it = stream.wait(); +impl<'a, S> CopyOutReader<'a, S> +where + S: Stream>, +{ + pub(crate) fn new(stream: S) -> Result, Error> { + let mut it = executor::block_on_stream(Box::pin(stream)); let cur = match it.next() { Some(Ok(cur)) => cur, Some(Err(e)) => return Err(e), @@ -35,7 +43,10 @@ impl<'a> CopyOutReader<'a> { } } -impl<'a> Read for CopyOutReader<'a> { +impl<'a, S> Read for CopyOutReader<'a, S> +where + S: Stream>, +{ fn read(&mut self, buf: &mut [u8]) -> io::Result { let b = self.fill_buf()?; let len = usize::min(buf.len(), b.len()); @@ -45,7 +56,10 @@ impl<'a> Read for CopyOutReader<'a> { } } -impl<'a> BufRead for CopyOutReader<'a> { +impl<'a, S> BufRead for CopyOutReader<'a, S> +where + S: Stream>, +{ fn fill_buf(&mut self) -> io::Result<&[u8]> { if self.cur.remaining() == 0 { match self.it.next() { diff --git a/postgres/src/iter.rs b/postgres/src/iter.rs new file mode 100644 index 000000000..1f3ffc962 --- /dev/null +++ b/postgres/src/iter.rs @@ -0,0 +1,45 @@ +use fallible_iterator::FallibleIterator; +use futures::executor::{self, BlockingStream}; +use futures::Stream; +use std::marker::PhantomData; +use std::pin::Pin; + +pub struct Iter<'a, S> +where + S: Stream, +{ + it: BlockingStream>>, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend the borrow until drop +impl<'a, S> Drop for Iter<'a, S> +where + S: Stream, +{ + fn drop(&mut self) {} +} + +impl<'a, S> Iter<'a, S> +where + S: Stream, +{ + pub fn new(stream: S) -> Iter<'a, S> { + Iter { + it: executor::block_on_stream(Box::pin(stream)), + _p: PhantomData, + } + } +} + +impl<'a, S, T, E> FallibleIterator for Iter<'a, S> +where + S: Stream>, +{ + type Item = T; + type Error = E; + + fn next(&mut self) -> Result, E> { + self.it.next().transpose() + } +} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index a96eb1684..3b5ee03e8 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -54,6 +54,7 @@ //! crates, respectively. #![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] +#![feature(async_await)] #[cfg(feature = "runtime")] use lazy_static::lazy_static; @@ -69,14 +70,10 @@ pub use tokio_postgres::{ pub use crate::client::*; #[cfg(feature = "runtime")] pub use crate::config::Config; -pub use crate::copy_out_reader::*; #[doc(no_inline)] pub use crate::error::Error; -pub use crate::query_iter::*; -pub use crate::query_portal_iter::*; #[doc(no_inline)] pub use crate::row::{Row, SimpleQueryRow}; -pub use crate::simple_query_iter::*; #[doc(no_inline)] pub use crate::tls::NoTls; pub use crate::to_statement::*; @@ -85,10 +82,9 @@ pub use crate::transaction::*; mod client; #[cfg(feature = "runtime")] pub mod config; +mod copy_in_stream; mod copy_out_reader; -mod query_iter; -mod query_portal_iter; -mod simple_query_iter; +mod iter; mod to_statement; mod transaction; diff --git a/postgres/src/query_iter.rs b/postgres/src/query_iter.rs deleted file mode 100644 index 8f9a50598..000000000 --- a/postgres/src/query_iter.rs +++ /dev/null @@ -1,38 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::stream::{self, Stream}; -use std::marker::PhantomData; -use tokio_postgres::impls; -use tokio_postgres::{Error, Row}; - -/// The iterator returned by the `query_iter` method. -pub struct QueryIter<'a> { - it: stream::Wait, - _p: PhantomData<&'a mut ()>, -} - -// no-op impl to extend the borrow until drop -impl<'a> Drop for QueryIter<'a> { - fn drop(&mut self) {} -} - -impl<'a> QueryIter<'a> { - pub(crate) fn new(stream: impls::Query) -> QueryIter<'a> { - QueryIter { - it: stream.wait(), - _p: PhantomData, - } - } -} - -impl<'a> FallibleIterator for QueryIter<'a> { - type Item = Row; - type Error = Error; - - fn next(&mut self) -> Result, Error> { - match self.it.next() { - Some(Ok(row)) => Ok(Some(row)), - Some(Err(e)) => Err(e), - None => Ok(None), - } - } -} diff --git a/postgres/src/query_portal_iter.rs b/postgres/src/query_portal_iter.rs deleted file mode 100644 index 8fab34865..000000000 --- a/postgres/src/query_portal_iter.rs +++ /dev/null @@ -1,38 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::stream::{self, Stream}; -use std::marker::PhantomData; -use tokio_postgres::impls; -use tokio_postgres::{Error, Row}; - -/// The iterator returned by the `query_portal_iter` method. -pub struct QueryPortalIter<'a> { - it: stream::Wait, - _p: PhantomData<&'a mut ()>, -} - -// no-op impl to extend the borrow until drop -impl<'a> Drop for QueryPortalIter<'a> { - fn drop(&mut self) {} -} - -impl<'a> QueryPortalIter<'a> { - pub(crate) fn new(stream: impls::QueryPortal) -> QueryPortalIter<'a> { - QueryPortalIter { - it: stream.wait(), - _p: PhantomData, - } - } -} - -impl<'a> FallibleIterator for QueryPortalIter<'a> { - type Item = Row; - type Error = Error; - - fn next(&mut self) -> Result, Error> { - match self.it.next() { - Some(Ok(row)) => Ok(Some(row)), - Some(Err(e)) => Err(e), - None => Ok(None), - } - } -} diff --git a/postgres/src/simple_query_iter.rs b/postgres/src/simple_query_iter.rs deleted file mode 100644 index 3053cd30b..000000000 --- a/postgres/src/simple_query_iter.rs +++ /dev/null @@ -1,38 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::stream::{self, Stream}; -use std::marker::PhantomData; -use tokio_postgres::impls; -use tokio_postgres::{Error, SimpleQueryMessage}; - -/// The iterator returned by the `simple_query_iter` method. -pub struct SimpleQueryIter<'a> { - it: stream::Wait, - _p: PhantomData<&'a mut ()>, -} - -// no-op impl to extend borrow until drop -impl<'a> Drop for SimpleQueryIter<'a> { - fn drop(&mut self) {} -} - -impl<'a> SimpleQueryIter<'a> { - pub(crate) fn new(stream: impls::SimpleQuery) -> SimpleQueryIter<'a> { - SimpleQueryIter { - it: stream.wait(), - _p: PhantomData, - } - } -} - -impl<'a> FallibleIterator for SimpleQueryIter<'a> { - type Item = SimpleQueryMessage; - type Error = Error; - - fn next(&mut self) -> Result, Error> { - match self.it.next() { - Some(Ok(row)) => Ok(Some(row)), - Some(Err(e)) => Err(e), - None => Ok(None), - } - } -} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 0ff766cbb..06399a19a 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,5 @@ use std::io::Read; +use tokio::runtime::Runtime; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -95,6 +96,7 @@ fn transaction_drop() { assert_eq!(rows.len(), 0); } +/* #[test] fn nested_transactions() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); @@ -145,6 +147,7 @@ fn nested_transactions() { assert_eq!(rows[1].get::<_, i32>(0), 3); assert_eq!(rows[2].get::<_, i32>(0), 4); } +*/ #[test] fn copy_in() { @@ -222,3 +225,21 @@ fn portal() { assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); } + +#[test] +fn custom_executor() { + let runtime = Runtime::new().unwrap(); + let mut config = "host=localhost port=5433 user=postgres" + .parse::() + .unwrap(); + config.executor(runtime.executor()); + + let mut client = config.connect(NoTls).unwrap(); + + let rows = client.query("SELECT $1::TEXT", &[&"hello"]).unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, &str>(0), "hello"); + + drop(runtime); + assert!(client.is_closed()); +} diff --git a/postgres/src/to_statement.rs b/postgres/src/to_statement.rs index 5cbe56a38..a77ad28ac 100644 --- a/postgres/src/to_statement.rs +++ b/postgres/src/to_statement.rs @@ -1,11 +1,28 @@ use tokio_postgres::Error; -use crate::{Client, Statement}; +use crate::{Client, Statement, Transaction}; mod sealed { pub trait Sealed {} } +#[doc(hidden)] +pub trait Prepare { + fn prepare(&mut self, query: &str) -> Result; +} + +impl Prepare for Client { + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } +} + +impl<'a> Prepare for Transaction<'a> { + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } +} + /// A trait abstracting over prepared and unprepared statements. /// /// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which @@ -14,13 +31,18 @@ mod sealed { /// This trait is "sealed" and cannot be implemented by anything outside this crate. pub trait ToStatement: sealed::Sealed { #[doc(hidden)] - fn __statement(&self, client: &mut Client) -> Result; + fn __statement(&self, client: &mut T) -> Result + where + T: Prepare; } impl sealed::Sealed for str {} impl ToStatement for str { - fn __statement(&self, client: &mut Client) -> Result { + fn __statement(&self, client: &mut T) -> Result + where + T: Prepare, + { client.prepare(self) } } @@ -28,7 +50,10 @@ impl ToStatement for str { impl sealed::Sealed for Statement {} impl ToStatement for Statement { - fn __statement(&self, _: &mut Client) -> Result { + fn __statement(&self, _: &mut T) -> Result + where + T: Prepare, + { Ok(self.clone()) } } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 8850baee8..fcf7ce78c 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,79 +1,45 @@ use fallible_iterator::FallibleIterator; -use futures::Future; -use std::io::Read; +use futures::executor; +use std::io::{BufRead, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; -use crate::{ - Client, CopyOutReader, Portal, QueryIter, QueryPortalIter, SimpleQueryIter, Statement, - ToStatement, -}; +use crate::copy_in_stream::CopyInStream; +use crate::copy_out_reader::CopyOutReader; +use crate::iter::Iter; +use crate::{Portal, Statement, ToStatement}; /// A representation of a PostgreSQL database transaction. /// /// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made /// in the transaction. Transactions can be nested, with inner transactions implemented via safepoints. -pub struct Transaction<'a> { - client: &'a mut Client, - depth: u32, - done: bool, -} - -impl<'a> Drop for Transaction<'a> { - fn drop(&mut self) { - if !self.done { - let _ = self.rollback_inner(); - } - } -} +pub struct Transaction<'a>(tokio_postgres::Transaction<'a>); impl<'a> Transaction<'a> { - pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { - Transaction { - client, - depth: 0, - done: false, - } + pub(crate) fn new(transaction: tokio_postgres::Transaction<'a>) -> Transaction<'a> { + Transaction(transaction) } /// Consumes the transaction, committing all changes made within it. - pub fn commit(mut self) -> Result<(), Error> { - self.done = true; - if self.depth == 0 { - self.client.simple_query("COMMIT")?; - } else { - self.client - .simple_query(&format!("RELEASE sp{}", self.depth))?; - } - Ok(()) + pub fn commit(self) -> Result<(), Error> { + executor::block_on(self.0.commit()) } /// Rolls the transaction back, discarding all changes made within it. /// /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. - pub fn rollback(mut self) -> Result<(), Error> { - self.done = true; - self.rollback_inner() - } - - fn rollback_inner(&mut self) -> Result<(), Error> { - if self.depth == 0 { - self.client.simple_query("ROLLBACK")?; - } else { - self.client - .simple_query(&format!("ROLLBACK TO sp{}", self.depth))?; - } - Ok(()) + pub fn rollback(self) -> Result<(), Error> { + executor::block_on(self.0.rollback()) } /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { - self.client.prepare(query) + executor::block_on(self.0.prepare(query)) } /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.client.prepare_typed(query, types) + executor::block_on(self.0.prepare_typed(query, types)) } /// Like `Client::execute`. @@ -81,7 +47,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.client.execute(query, params) + let statement = query.__statement(self)?; + executor::block_on(self.0.execute(&statement, params)) } /// Like `Client::query`. @@ -89,7 +56,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.client.query(query, params) + self.query_iter(query, params)?.collect() } /// Like `Client::query_iter`. @@ -97,11 +64,12 @@ impl<'a> Transaction<'a> { &mut self, query: &T, params: &[&dyn ToSql], - ) -> Result, Error> + ) -> Result, Error> where T: ?Sized + ToStatement, { - self.client.query_iter(query, params) + let statement = query.__statement(self)?; + Ok(Iter::new(self.0.query(&statement, params))) } /// Binds parameters to a statement, creating a "portal". @@ -118,8 +86,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let statement = query.__statement(&mut self.client)?; - self.client.get_mut().bind(&statement, params).wait() + let statement = query.__statement(self)?; + executor::block_on(self.0.bind(&statement, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -136,10 +104,8 @@ impl<'a> Transaction<'a> { &mut self, portal: &Portal, max_rows: i32, - ) -> Result, Error> { - Ok(QueryPortalIter::new( - self.client.get_mut().query_portal(&portal, max_rows), - )) + ) -> Result, Error> { + Ok(Iter::new(self.0.query_portal(&portal, max_rows))) } /// Like `Client::copy_in`. @@ -151,42 +117,48 @@ impl<'a> Transaction<'a> { ) -> Result where T: ?Sized + ToStatement, - R: Read, + R: Read + Unpin, { - self.client.copy_in(query, params, reader) + let statement = query.__statement(self)?; + executor::block_on(self.0.copy_in(&statement, params, CopyInStream(reader))) } /// Like `Client::copy_out`. - pub fn copy_out( - &mut self, + pub fn copy_out<'b, T>( + &'a mut self, query: &T, params: &[&dyn ToSql], - ) -> Result, Error> + ) -> Result where T: ?Sized + ToStatement, { - self.client.copy_out(query, params) + let statement = query.__statement(self)?; + let stream = self.0.copy_out(&statement, params); + CopyOutReader::new(stream) } /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { - self.client.simple_query(query) + self.simple_query_iter(query)?.collect() } /// Like `Client::simple_query_iter`. - pub fn simple_query_iter(&mut self, query: &str) -> Result, Error> { - self.client.simple_query_iter(query) - } - - /// Like `Client::transaction`. - pub fn transaction(&mut self) -> Result, Error> { - let depth = self.depth + 1; - self.client - .simple_query(&format!("SAVEPOINT sp{}", depth))?; - Ok(Transaction { - client: self.client, - depth, - done: false, - }) - } + pub fn simple_query_iter<'b>( + &'b mut self, + query: &str, + ) -> Result + 'b, Error> { + Ok(Iter::new(self.0.simple_query(query))) + } + + // /// Like `Client::transaction`. + // pub fn transaction(&mut self) -> Result, Error> { + // let depth = self.depth + 1; + // self.client + // .simple_query(&format!("SAVEPOINT sp{}", depth))?; + // Ok(Transaction { + // client: self.client, + // depth, + // done: false, + // }) + // } } diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 03f0d0314..ba43be6d8 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -370,4 +370,11 @@ impl Client { self.secret_key, ) } + + /// Determines if the connection to the server has already closed. + /// + /// In that case, all future queries will fail. + pub fn is_closed(&self) -> bool { + self.inner.sender.is_closed() + } } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 68fe44596..19df1a354 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -126,7 +126,7 @@ pub(crate) enum Host { /// ```not_rust /// postgresql:///mydb?user=user&host=/var/lib/postgresql /// ``` -#[derive(PartialEq)] +#[derive(PartialEq, Clone)] pub struct Config { pub(crate) user: Option, pub(crate) password: Option>, diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index c8d55a340..c3f90ef88 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -5,6 +5,7 @@ use crate::maybe_tls_stream::MaybeTlsStream; use crate::{AsyncMessage, Error, Notification}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; +use futures::stream::FusedStream; use futures::{ready, Sink, Stream, StreamExt}; use log::trace; use postgres_protocol::message::backend::Message; @@ -173,6 +174,10 @@ where return Poll::Ready(Some(messages)); } + if self.receiver.is_terminated() { + return Poll::Ready(None); + } + match self.receiver.poll_next_unpin(cx) { Poll::Ready(Some(request)) => { trace!("polled new request"); From 2311ceac411fc03a17fe306175b5392fa3161bcc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Aug 2019 18:11:22 -0700 Subject: [PATCH 221/819] Cleanup shutdown call --- tokio-postgres/src/cancel_query_raw.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index 08448ad62..d85b4281d 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -1,9 +1,7 @@ use crate::config::SslMode; use crate::tls::TlsConnect; use crate::{connect_tls, Error}; -use futures::future; use postgres_protocol::message::frontend; -use std::pin::Pin; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; pub async fn cancel_query_raw( @@ -24,9 +22,7 @@ where stream.write_all(&buf).await.map_err(Error::io)?; stream.flush().await.map_err(Error::io)?; - future::poll_fn(|cx| Pin::new(&mut stream).poll_shutdown(cx)) - .await - .map_err(Error::io)?; + stream.shutdown().await.map_err(Error::io)?; Ok(()) } From f07ebc73731a8b1e1a46176b7b0322cf26922cc5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Aug 2019 18:25:28 -0700 Subject: [PATCH 222/819] Support nested transactions --- postgres/src/client.rs | 14 ++++++++++++ postgres/src/test.rs | 4 +--- postgres/src/transaction.rs | 21 +++++++++--------- tokio-postgres/src/transaction.rs | 36 ++++++++++++++++++++++++++++--- 4 files changed, 58 insertions(+), 17 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e4a1e3820..bb53e0a7c 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -319,6 +319,20 @@ impl Client { Ok(Iter::new(self.0.simple_query(query))) } + /// Executes a sequence of SQL statements using the simple query protocol. + /// + /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that + /// point. This is intended for use when, for example, initializing a database schema. + /// + /// # Warning + /// + /// Prepared statements should be use for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass + /// them to this method! + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + executor::block_on(self.0.batch_execute(query)) + } + /// Begins a new database transaction. /// /// The transaction will roll back by default - use the `commit` method to commit it. diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 06399a19a..59953e4e9 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -96,13 +96,12 @@ fn transaction_drop() { assert_eq!(rows.len(), 0); } -/* #[test] fn nested_transactions() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client - .simple_query("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .batch_execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") .unwrap(); let mut transaction = client.transaction().unwrap(); @@ -147,7 +146,6 @@ fn nested_transactions() { assert_eq!(rows[1].get::<_, i32>(0), 3); assert_eq!(rows[2].get::<_, i32>(0), 4); } -*/ #[test] fn copy_in() { diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index fcf7ce78c..65ac5176f 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -150,15 +150,14 @@ impl<'a> Transaction<'a> { Ok(Iter::new(self.0.simple_query(query))) } - // /// Like `Client::transaction`. - // pub fn transaction(&mut self) -> Result, Error> { - // let depth = self.depth + 1; - // self.client - // .simple_query(&format!("SAVEPOINT sp{}", depth))?; - // Ok(Transaction { - // client: self.client, - // depth, - // done: false, - // }) - // } + /// Like `Client::batch_execute`. + pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + executor::block_on(self.0.batch_execute(query)) + } + + /// Like `Client::transaction`. + pub fn transaction(&mut self) -> Result, Error> { + let transaction = executor::block_on(self.0.transaction())?; + Ok(Transaction(transaction)) + } } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index c1271d217..0489f09f4 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -20,6 +20,7 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// transaction. Transactions can be nested, with inner transactions implemented via safepoints. pub struct Transaction<'a> { client: &'a mut Client, + depth: u32, done: bool, } @@ -30,7 +31,12 @@ impl<'a> Drop for Transaction<'a> { } let mut buf = vec![]; - frontend::query("ROLLBACK", &mut buf).unwrap(); + let query = if self.depth == 0 { + "ROLLBACK".to_string() + } else { + format!("ROLLBACK TO sp{}", self.depth) + }; + frontend::query(&query, &mut buf).unwrap(); let _ = self .client .inner() @@ -42,6 +48,7 @@ impl<'a> Transaction<'a> { pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { Transaction { client, + depth: 0, done: false, } } @@ -49,7 +56,12 @@ impl<'a> Transaction<'a> { /// Consumes the transaction, committing all changes made within it. pub async fn commit(mut self) -> Result<(), Error> { self.done = true; - self.client.batch_execute("COMMIT").await + let query = if self.depth == 0 { + "COMMIT".to_string() + } else { + format!("RELEASE sp{}", self.depth) + }; + self.client.batch_execute(&query).await } /// Rolls the transaction back, discarding all changes made within it. @@ -57,7 +69,12 @@ impl<'a> Transaction<'a> { /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub async fn rollback(mut self) -> Result<(), Error> { self.done = true; - self.client.batch_execute("ROLLBACK").await + let query = if self.depth == 0 { + "ROLLBACK".to_string() + } else { + format!("ROLLBACK TO sp{}", self.depth) + }; + self.client.batch_execute(&query).await } /// Like `Client::prepare`. @@ -227,4 +244,17 @@ impl<'a> Transaction<'a> { { self.client.cancel_query_raw(stream, tls) } + + /// Like `Client::transaction`. + pub async fn transaction(&mut self) -> Result, Error> { + let depth = self.depth + 1; + let query = format!("SAVEPOINT sp{}", depth); + self.batch_execute(&query).await?; + + Ok(Transaction { + client: self.client, + depth, + done: false, + }) + } } From 3ed45434261b98c5666e3ed9cbb69223a8504b25 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 4 Aug 2019 19:21:32 -0700 Subject: [PATCH 223/819] Don't block the reactor on DNS --- postgres-native-tls/src/lib.rs | 8 +++++--- postgres-native-tls/src/test.rs | 21 +++++++++++--------- postgres/src/config.rs | 8 ++++++-- tokio-postgres/src/connect_socket.rs | 29 +++++++++++++++++++++++----- 4 files changed, 47 insertions(+), 19 deletions(-) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 32809f59c..0e9ac30c7 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -49,13 +49,13 @@ #![warn(rust_2018_idioms, clippy::all, missing_docs)] #![feature(async_await)] +use std::future::Future; +use std::pin::Pin; use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; use tokio_tls::TlsStream; -use std::pin::Pin; -use std::future::Future; #[cfg(test)] mod test; @@ -111,7 +111,9 @@ where { type Stream = TlsStream; type Error = native_tls::Error; - type Future = Pin, ChannelBinding), native_tls::Error>> + Send>>; + type Future = Pin< + Box, ChannelBinding), native_tls::Error>> + Send>, + >; fn connect(self, stream: S) -> Self::Future { let future = async move { diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 81f93398f..5e9dac58a 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,16 +1,16 @@ +use futures::{FutureExt, TryStreamExt}; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; -use futures::{FutureExt, TryStreamExt}; #[cfg(feature = "runtime")] use crate::MakeTlsConnector; use crate::TlsConnector; async fn smoke_test(s: &str, tls: T) - where - T: TlsConnect, - T::Stream: 'static + Send, +where + T: TlsConnect, + T::Stream: 'static + Send, { let stream = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) .await @@ -44,7 +44,8 @@ async fn require() { smoke_test( "user=ssl_user dbname=postgres sslmode=require", TlsConnector::new(connector, "localhost"), - ).await; + ) + .await; } #[tokio::test] @@ -58,7 +59,8 @@ async fn prefer() { smoke_test( "user=ssl_user dbname=postgres", TlsConnector::new(connector, "localhost"), - ).await; + ) + .await; } #[tokio::test] @@ -72,7 +74,8 @@ async fn scram_user() { smoke_test( "user=scram_user password=password dbname=postgres sslmode=require", TlsConnector::new(connector, "localhost"), - ).await; + ) + .await; } #[tokio::test] @@ -90,8 +93,8 @@ async fn runtime() { "host=localhost port=5433 user=postgres sslmode=require", connector, ) - .await - .unwrap(); + .await + .unwrap(); let connection = connection.map(|r| r.unwrap()); tokio::spawn(connection); diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 2c2fa6554..9c44c4cb1 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -277,8 +277,12 @@ impl Config { }); match &self.executor { Some(executor) => { - executor.lock().unwrap().spawn(Box::pin(connection)).unwrap(); - }, + executor + .lock() + .unwrap() + .spawn(Box::pin(connection)) + .unwrap(); + } None => { RUNTIME.spawn(connection); } diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 3209b139e..5c7d7271e 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,9 +1,12 @@ use crate::config::Host; use crate::{Error, Socket}; +use std::vec; +use futures::channel::oneshot; +use futures::future; use std::future::Future; -use std::io; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::time::Duration; +use std::{io, thread}; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; @@ -23,10 +26,7 @@ pub(crate) async fn connect_socket( // avoid dealing with blocking DNS entirely if possible vec![SocketAddr::new(ip, port)].into_iter() } - Err(_) => { - // FIXME what do? - (&**host, port).to_socket_addrs().map_err(Error::connect)? - } + Err(_) => dns(host, port).await.map_err(Error::connect)?, }; let mut error = None; @@ -64,6 +64,25 @@ pub(crate) async fn connect_socket( } } +async fn dns(host: &str, port: u16) -> io::Result> { + // if we're running on a threadpool, use its blocking support + if let Ok(r) = + future::poll_fn(|_| tokio_threadpool::blocking(|| (host, port).to_socket_addrs())).await + { + return r; + } + + // FIXME what should we do here? + let (tx, rx) = oneshot::channel(); + let host = host.to_string(); + thread::spawn(move || { + let addrs = (&*host, port).to_socket_addrs(); + let _ = tx.send(addrs); + }); + + rx.await.unwrap() +} + async fn connect_with_timeout(connect: F, timeout: Option) -> Result where F: Future>, From 92e3d013eb0bd1eb5988ee4c58291402aff56c1c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 10 Aug 2019 20:25:12 -0700 Subject: [PATCH 224/819] Update to released tokio --- .circleci/config.yml | 8 ++++---- postgres-native-tls/Cargo.toml | 10 +++++----- postgres-openssl/Cargo.toml | 8 ++++---- postgres/Cargo.toml | 10 +++++----- tokio-postgres/Cargo.toml | 10 +++++----- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3acad7821..875ab4ffb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,20 +22,20 @@ version: 2 jobs: build: docker: - - image: rust:1.35.0 + - image: rustlang/rust:nightly environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:5 steps: - checkout - - run: rustup component add rustfmt clippy +# - run: rustup component add rustfmt clippy - *RESTORE_REGISTRY - run: cargo generate-lockfile - *SAVE_REGISTRY - run: rustc --version > ~/rust-version - *RESTORE_DEPS - - run: cargo fmt --all -- --check - - run: cargo clippy --all --all-targets --all-features +# - run: cargo fmt --all -- --check +# - run: cargo clippy --all --all-targets --all-features - run: cargo test --all - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index a204d54c4..59fde802a 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "0.3.0-alpha.17" +futures-preview = "=0.3.0-alpha.17" native-tls = "0.2" -tokio-io = { git = "https://github.com/tokio-rs/tokio" } -tokio-tls = { git = "https://github.com/tokio-rs/tokio" } +tokio-io = "=0.2.0-alpha.1" +tokio-tls = "=0.3.0-alpha.1" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { git = "https://github.com/tokio-rs/tokio" } -#postgres = { version = "0.16.0-rc.1", path = "../postgres" } +tokio = "=0.2.0-alpha.1" +postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 4c743152f..6aedce285 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "0.3.0-alpha.17" +futures-preview = "=0.3.0-alpha.17" openssl = "0.10" -tokio-io = { git = "https://github.com/tokio-rs/tokio" } +tokio-io = "=0.2.0-alpha.1" tokio-openssl = { git = "https://github.com/sfackler/tokio-openssl", branch = "tokio-02" } tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { git = "https://github.com/tokio-rs/tokio" } -#postgres = { version = "0.16.0-rc.1", path = "../postgres" } +tokio = "=0.2.0-alpha.1" +postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 3e24504fd..bc47ae829 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -30,14 +30,14 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = "0.3.0-alpha.17" -pin-utils = "0.1.0-alpha.4" +futures-preview = "=0.3.0-alpha.17" +pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } -tokio-executor = { git = "https://github.com/tokio-rs/tokio" } +tokio-executor = "=0.2.0-alpha.1" -tokio = { git = "https://github.com/tokio-rs/tokio", optional = true } +tokio = { version = "=0.2.0-alpha.1", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = { git = "https://github.com/tokio-rs/tokio" } +tokio = "=0.2.0-alpha.1" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 5b8b99bee..02e4d5caf 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,16 +33,16 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = { version = "0.3.0-alpha.17", features = ["nightly", "async-await"] } +futures-preview = { version = "=0.3.0-alpha.17", features = ["nightly", "async-await"] } log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" -pin-utils = "0.1.0-alpha.4" +pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio = { git = "https://github.com/tokio-rs/tokio", default-features = false, features = ["io", "codec"] } +tokio = { version = "=0.2.0-alpha.1", default-features = false, features = ["io", "codec"] } -tokio-threadpool = { git = "https://github.com/tokio-rs/tokio", optional = true } +tokio-threadpool = { version = "=0.2.0-alpha.1", optional = true } lazy_static = { version = "1.0", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } @@ -54,5 +54,5 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] -tokio = { git = "https://github.com/tokio-rs/tokio" } +tokio = "=0.2.0-alpha.1" env_logger = "0.5" From 1b5d65d4620cfb4fe2800fb01df638de119568b4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 12 Aug 2019 09:22:32 -0700 Subject: [PATCH 225/819] Use a released tokio-openssl --- postgres-openssl/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 6aedce285..21198e9dd 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -19,7 +19,7 @@ runtime = ["tokio-postgres/runtime"] futures-preview = "=0.3.0-alpha.17" openssl = "0.10" tokio-io = "=0.2.0-alpha.1" -tokio-openssl = { git = "https://github.com/sfackler/tokio-openssl", branch = "tokio-02" } +tokio-openssl = "=0.4.0-alpha.1" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] From aa44090fadf7d84ca9e4e55bf89676bc47f32736 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 18 Aug 2019 09:02:21 -0400 Subject: [PATCH 226/819] Update futures-preview/tokio --- postgres-native-tls/Cargo.toml | 8 ++++---- postgres-openssl/Cargo.toml | 8 ++++---- postgres/Cargo.toml | 8 ++++---- tokio-postgres/Cargo.toml | 10 +++++----- tokio-postgres/src/connect_socket.rs | 5 +++-- 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 59fde802a..fdea8beab 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "=0.3.0-alpha.17" +futures-preview = "=0.3.0-alpha.18" native-tls = "0.2" -tokio-io = "=0.2.0-alpha.1" -tokio-tls = "=0.3.0-alpha.1" +tokio-io = "=0.2.0-alpha.2" +tokio-tls = "=0.3.0-alpha.2" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.1" +tokio = "=0.2.0-alpha.2" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 21198e9dd..a44e63ecc 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "=0.3.0-alpha.17" +futures-preview = "=0.3.0-alpha.18" openssl = "0.10" -tokio-io = "=0.2.0-alpha.1" -tokio-openssl = "=0.4.0-alpha.1" +tokio-io = "=0.2.0-alpha.2" +tokio-openssl = "=0.4.0-alpha.2" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.1" +tokio = "=0.2.0-alpha.2" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index bc47ae829..fd5b2e835 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -30,14 +30,14 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = "=0.3.0-alpha.17" +futures-preview = "=0.3.0-alpha.18" pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } -tokio-executor = "=0.2.0-alpha.1" +tokio-executor = "=0.2.0-alpha.2" -tokio = { version = "=0.2.0-alpha.1", optional = true } +tokio = { version = "=0.2.0-alpha.2", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.1" +tokio = "=0.2.0-alpha.2" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 02e4d5caf..92790e96c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -21,7 +21,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds", "tokio-threadpool", "lazy_static"] +runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds", "tokio-executor", "lazy_static"] "with-bit-vec-0_5" = ["bit-vec-05"] "with-chrono-0_4" = ["chrono-04"] @@ -33,16 +33,16 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = { version = "=0.3.0-alpha.17", features = ["nightly", "async-await"] } +futures-preview = { version = "=0.3.0-alpha.18", features = ["nightly", "async-await"] } log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio = { version = "=0.2.0-alpha.1", default-features = false, features = ["io", "codec"] } +tokio = { version = "=0.2.0-alpha.2", default-features = false, features = ["io", "codec"] } -tokio-threadpool = { version = "=0.2.0-alpha.1", optional = true } +tokio-executor = { version = "=0.2.0-alpha.2", optional = true } lazy_static = { version = "1.0", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } @@ -54,5 +54,5 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.1" +tokio = "=0.2.0-alpha.2" env_logger = "0.5" diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 5c7d7271e..bcb7cfe03 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,16 +1,17 @@ use crate::config::Host; use crate::{Error, Socket}; -use std::vec; use futures::channel::oneshot; use futures::future; use std::future::Future; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::time::Duration; +use std::vec; use std::{io, thread}; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; use tokio::timer::Timeout; +use tokio_executor::threadpool; pub(crate) async fn connect_socket( host: &Host, @@ -67,7 +68,7 @@ pub(crate) async fn connect_socket( async fn dns(host: &str, port: u16) -> io::Result> { // if we're running on a threadpool, use its blocking support if let Ok(r) = - future::poll_fn(|_| tokio_threadpool::blocking(|| (host, port).to_socket_addrs())).await + future::poll_fn(|_| threadpool::blocking(|| (host, port).to_socket_addrs())).await { return r; } From c02664482043cb85bae2107d8e9139d3e5e86d75 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 21 Aug 2019 07:48:36 -0400 Subject: [PATCH 227/819] async_await is stable on nightly now --- postgres-native-tls/src/lib.rs | 1 - postgres-openssl/src/lib.rs | 1 - postgres/src/lib.rs | 1 - tokio-postgres/src/lib.rs | 1 - tokio-postgres/tests/test/main.rs | 1 - 5 files changed, 5 deletions(-) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 0e9ac30c7..7b45e7f69 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -47,7 +47,6 @@ //! ``` #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -#![feature(async_await)] use std::future::Future; use std::pin::Pin; diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index bf210c269..94b0fe50f 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -41,7 +41,6 @@ //! ``` #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.2.0-rc.1")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -#![feature(async_await)] #[cfg(feature = "runtime")] use openssl::error::ErrorStack; diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 3b5ee03e8..901d3c4eb 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -54,7 +54,6 @@ //! crates, respectively. #![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] -#![feature(async_await)] #[cfg(feature = "runtime")] use lazy_static::lazy_static; diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0c1599026..b0cfb30d8 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -106,7 +106,6 @@ //! crates, respectively. #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -#![feature(async_await)] pub use crate::client::Client; pub use crate::config::Config; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 234e57a26..9994393b8 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,5 +1,4 @@ #![warn(rust_2018_idioms)] -#![feature(async_await)] use futures::channel::mpsc; use futures::{future, stream, StreamExt}; From 2a2b76d1b8ce63c3decd7c02b07cb799ebacbc95 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 29 Aug 2019 18:10:58 -0700 Subject: [PATCH 228/819] Upgrade to tokio alpha.4 --- Cargo.toml | 2 +- postgres-native-tls/Cargo.toml | 6 +-- postgres-openssl/Cargo.toml | 6 +-- postgres-openssl/src/test.rs | 4 +- postgres/Cargo.toml | 6 +-- tokio-postgres/Cargo.toml | 6 +-- tokio-postgres/src/connect_socket.rs | 66 ++++------------------------ tokio-postgres/tests/test/main.rs | 12 ++--- tokio-postgres/tests/test/runtime.rs | 4 +- 9 files changed, 29 insertions(+), 83 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cf7f99ca2..015745ef2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ members = [ "codegen", "postgres", - "postgres-native-tls", +# "postgres-native-tls", "postgres-openssl", "postgres-protocol", "tokio-postgres", diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index fdea8beab..480230391 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures-preview = "=0.3.0-alpha.18" native-tls = "0.2" -tokio-io = "=0.2.0-alpha.2" -tokio-tls = "=0.3.0-alpha.2" +tokio-io = "=0.2.0-alpha.4" +tokio-tls = "=0.3.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.2" +tokio = "=0.2.0-alpha.4" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index a44e63ecc..ed1aa6c13 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures-preview = "=0.3.0-alpha.18" openssl = "0.10" -tokio-io = "=0.2.0-alpha.2" -tokio-openssl = "=0.4.0-alpha.2" +tokio-io = "=0.2.0-alpha.4" +tokio-openssl = "=0.4.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.2" +tokio = "=0.2.0-alpha.4" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index 927651c69..df9054a06 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -10,9 +10,7 @@ where T: TlsConnect, T::Stream: 'static + Send, { - let stream = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .await - .unwrap(); + let stream = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let builder = s.parse::().unwrap(); let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index fd5b2e835..38110b322 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -33,11 +33,11 @@ fallible-iterator = "0.2" futures-preview = "=0.3.0-alpha.18" pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } -tokio-executor = "=0.2.0-alpha.2" +tokio-executor = "=0.2.0-alpha.4" -tokio = { version = "=0.2.0-alpha.2", optional = true } +tokio = { version = "=0.2.0-alpha.4", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.2" +tokio = "=0.2.0-alpha.4" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 92790e96c..36b84abe6 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,9 +40,9 @@ percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio = { version = "=0.2.0-alpha.2", default-features = false, features = ["io", "codec"] } +tokio = { version = "=0.2.0-alpha.4", default-features = false, features = ["io", "codec"] } -tokio-executor = { version = "=0.2.0-alpha.2", optional = true } +tokio-executor = { version = "=0.2.0-alpha.4", optional = true } lazy_static = { version = "1.0", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } @@ -54,5 +54,5 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.2" +tokio = "=0.2.0-alpha.4" env_logger = "0.5" diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index bcb7cfe03..e87fa0b37 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,17 +1,12 @@ use crate::config::Host; use crate::{Error, Socket}; -use futures::channel::oneshot; -use futures::future; use std::future::Future; -use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; +use std::io; use std::time::Duration; -use std::vec; -use std::{io, thread}; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; use tokio::timer::Timeout; -use tokio_executor::threadpool; pub(crate) async fn connect_socket( host: &Host, @@ -22,40 +17,16 @@ pub(crate) async fn connect_socket( ) -> Result { match host { Host::Tcp(host) => { - let addrs = match host.parse::() { - Ok(ip) => { - // avoid dealing with blocking DNS entirely if possible - vec![SocketAddr::new(ip, port)].into_iter() - } - Err(_) => dns(host, port).await.map_err(Error::connect)?, - }; - - let mut error = None; - for addr in addrs { - let new_error = - match connect_with_timeout(TcpStream::connect(&addr), connect_timeout).await { - Ok(socket) => { - socket.set_nodelay(true).map_err(Error::connect)?; - if keepalives { - socket - .set_keepalive(Some(keepalives_idle)) - .map_err(Error::connect)?; - } - - return Ok(Socket::new_tcp(socket)); - } - Err(e) => e, - }; - error = Some(new_error); + let socket = + connect_with_timeout(TcpStream::connect((&**host, port)), connect_timeout).await?; + socket.set_nodelay(true).map_err(Error::connect)?; + if keepalives { + socket + .set_keepalive(Some(keepalives_idle)) + .map_err(Error::connect)?; } - let error = error.unwrap_or_else(|| { - Error::connect(io::Error::new( - io::ErrorKind::InvalidData, - "resolved 0 addresses", - )) - }); - Err(error) + Ok(Socket::new_tcp(socket)) } #[cfg(unix)] Host::Unix(path) => { @@ -65,25 +36,6 @@ pub(crate) async fn connect_socket( } } -async fn dns(host: &str, port: u16) -> io::Result> { - // if we're running on a threadpool, use its blocking support - if let Ok(r) = - future::poll_fn(|_| threadpool::blocking(|| (host, port).to_socket_addrs())).await - { - return r; - } - - // FIXME what should we do here? - let (tx, rx) = oneshot::channel(); - let host = host.to_string(); - thread::spawn(move || { - let addrs = (&*host, port).to_socket_addrs(); - let _ = tx.send(addrs); - }); - - rx.await.unwrap() -} - async fn connect_with_timeout(connect: F, timeout: Option) -> Result where F: Future>, diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 9994393b8..941d3bd95 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -6,7 +6,7 @@ use futures::{join, try_join, FutureExt, TryStreamExt}; use std::fmt::Write; use std::time::{Duration, Instant}; use tokio::net::TcpStream; -use tokio::timer::Delay; +use tokio::timer; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; @@ -18,9 +18,7 @@ mod runtime; mod types; async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { - let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .await - .unwrap(); + let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let config = s.parse::().unwrap(); config.connect_raw(socket, NoTls).await } @@ -303,11 +301,9 @@ async fn simple_query() { async fn cancel_query_raw() { let mut client = connect("user=postgres").await; - let socket = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) - .await - .unwrap(); + let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let cancel = client.cancel_query_raw(socket, NoTls); - let cancel = Delay::new(Instant::now() + Duration::from_millis(100)).then(|()| cancel); + let cancel = timer::delay(Instant::now() + Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index f7d641a26..2d3f233e0 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,6 +1,6 @@ use futures::{join, FutureExt, TryStreamExt}; use std::time::{Duration, Instant}; -use tokio::timer::Delay; +use tokio::timer; use tokio_postgres::error::SqlState; use tokio_postgres::{Client, NoTls}; @@ -75,7 +75,7 @@ async fn cancel_query() { let mut client = connect("host=localhost port=5433 user=postgres").await; let cancel = client.cancel_query(NoTls); - let cancel = Delay::new(Instant::now() + Duration::from_millis(100)).then(|()| cancel); + let cancel = timer::delay(Instant::now() + Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); From c456d2c09f4d242ab0325600f256c11ba1f8e885 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 30 Aug 2019 14:15:03 -0700 Subject: [PATCH 229/819] Reenable postgres-native-tls --- Cargo.toml | 2 +- postgres-native-tls/src/test.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 015745ef2..cf7f99ca2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ members = [ "codegen", "postgres", -# "postgres-native-tls", + "postgres-native-tls", "postgres-openssl", "postgres-protocol", "tokio-postgres", diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 5e9dac58a..d4942fe8e 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -12,7 +12,7 @@ where T: TlsConnect, T::Stream: 'static + Send, { - let stream = TcpStream::connect(&"127.0.0.1:5433".parse().unwrap()) + let stream = TcpStream::connect("127.0.0.1:5433") .await .unwrap(); From 24cf9291e1d5a9a73ae6ae228f7a431b00890a2b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Sep 2019 18:01:41 -0700 Subject: [PATCH 230/819] Remove unneeded Sync bounds --- postgres/src/config.rs | 4 ++-- tokio-postgres/tests/test/runtime.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b04c03f87..8463d690f 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -19,7 +19,7 @@ use tokio_postgres::config::{SslMode, TargetSessionAttrs}; use crate::{Client, RUNTIME}; -type DynExecutor = dyn Executor + Send>> + Sync + Send; +type DynExecutor = dyn Executor + Send>> + Send; /// Connection configuration. /// @@ -242,7 +242,7 @@ impl Config { /// Defaults to a postgres-specific tokio `Runtime`. pub fn executor(&mut self, executor: E) -> &mut Config where - E: Executor + Send>> + 'static + Sync + Send, + E: Executor + Send>> + 'static + Send, { self.executor = Some(Arc::new(executor)); self diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 2af9a18d7..f959a16ef 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -57,7 +57,7 @@ fn target_session_attrs_ok() { "host=localhost port=5433 user=postgres target_session_attrs=read-write", NoTls, ); - runtime.block_on(f).unwrap(); + let _ = runtime.block_on(f).unwrap(); } #[test] From ac8d7077d31032ab2dfc4617fe2517329d48f232 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Sep 2019 18:05:19 -0700 Subject: [PATCH 231/819] Remove uneeded Sync bounds --- postgres/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 9c44c4cb1..4a2d45097 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -95,7 +95,7 @@ use crate::{Client, RUNTIME}; pub struct Config { config: tokio_postgres::Config, // this is an option since we don't want to boot up our default runtime unless we're actually going to use it. - executor: Option>>, + executor: Option>>, } impl fmt::Debug for Config { @@ -239,7 +239,7 @@ impl Config { /// Defaults to a postgres-specific tokio `Runtime`. pub fn executor(&mut self, executor: E) -> &mut Config where - E: Executor + 'static + Sync + Send, + E: Executor + 'static + Send, { self.executor = Some(Arc::new(Mutex::new(executor))); self From 98fb1173f54abb893ab5dced57e305f1fec29931 Mon Sep 17 00:00:00 2001 From: Jeb Rosen Date: Tue, 3 Sep 2019 18:44:30 -0700 Subject: [PATCH 232/819] Make prepare() return a Future that satisfies Send. --- tokio-postgres/src/prepare.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 851673cad..4ad890274 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -126,8 +126,7 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { let stmt = typeinfo_statement(client).await?; - let params: &[&dyn ToSql] = &[&oid]; - let buf = query::encode(&stmt, params.iter().cloned()); + let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); let rows = query::query(client.clone(), stmt, buf); pin_mut!(rows); @@ -174,7 +173,7 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { fn get_type_rec<'a>( client: &'a Arc, oid: Oid, -) -> Pin> + 'a>> { +) -> Pin> + Send + 'a>> { Box::pin(get_type(client, oid)) } @@ -198,8 +197,7 @@ async fn typeinfo_statement(client: &Arc) -> Result, oid: Oid) -> Result, Error> { let stmt = typeinfo_enum_statement(client).await?; - let params: &[&dyn ToSql] = &[&oid]; - let buf = query::encode(&stmt, params.iter().cloned()); + let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); query::query(client.clone(), stmt, buf) .and_then(|row| future::ready(row.try_get(0))) .try_collect() @@ -226,8 +224,7 @@ async fn typeinfo_enum_statement(client: &Arc) -> Result, oid: Oid) -> Result, Error> { let stmt = typeinfo_composite_statement(client).await?; - let params: &[&dyn ToSql] = &[&oid]; - let buf = query::encode(&stmt, params.iter().cloned()); + let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); let rows = query::query(client.clone(), stmt, buf) .try_collect::>() .await?; From 42cf85516843d9d188ab400d074548a1bc127415 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 6 Sep 2019 04:37:26 -0700 Subject: [PATCH 233/819] Include the Rust type name in WrongType --- tokio-postgres/src/row.rs | 5 ++++- tokio-postgres/src/types/mod.rs | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 326733f84..825f4ee35 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -164,7 +164,10 @@ impl Row { let ty = self.columns()[idx].type_(); if !T::accepts(ty) { - return Err(Error::from_sql(Box::new(WrongType::new(ty.clone())), idx)); + return Err(Error::from_sql( + Box::new(WrongType::new::(ty.clone())), + idx, + )); } let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index baa98a8a2..a5f7604c0 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -68,7 +68,7 @@ where T: ToSql, { if !T::accepts(ty) { - return Err(Box::new(WrongType(ty.clone()))); + return Err(Box::new(WrongType::new::(ty.clone()))); } v.to_sql(ty, out) } @@ -91,6 +91,7 @@ mod type_gen; #[cfg(feature = "with-serde_json-1")] pub use crate::types::serde_json_1::Json; +use std::any::type_name; /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] @@ -206,14 +207,17 @@ impl Error for WasNull {} /// An error indicating that a conversion was attempted between incompatible /// Rust and Postgres types. #[derive(Debug)] -pub struct WrongType(Type); +pub struct WrongType { + postgres: Type, + rust: &'static str, +} impl fmt::Display for WrongType { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!( fmt, - "cannot convert to or from a Postgres value of type `{}`", - self.0 + "cannot convert between the Rust type `{}` and the Postgres type `{}`", + self.rust, self.postgres, ) } } @@ -221,8 +225,11 @@ impl fmt::Display for WrongType { impl Error for WrongType {} impl WrongType { - pub(crate) fn new(ty: Type) -> WrongType { - WrongType(ty) + pub(crate) fn new(ty: Type) -> WrongType { + WrongType { + postgres: ty, + rust: type_name::(), + } } } From 847015c655e6d4c1b34e2507b8eff8933abc6aba Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 8 Sep 2019 16:54:10 -0700 Subject: [PATCH 234/819] Fix link Closes #481 --- tokio-postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 1aaf82020..2379a2611 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -157,7 +157,7 @@ fn next_portal() -> String { /// /// Requires the `runtime` Cargo feature (enabled by default). /// -/// [`Config`]: ./Config.t.html +/// [`Config`]: config/struct.Config.html #[cfg(feature = "runtime")] pub fn connect(config: &str, tls: T) -> impls::Connect where From 08a163c546b831ed25e461715cedac6f4940a680 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 8 Sep 2019 16:54:57 -0700 Subject: [PATCH 235/819] Fix doc link --- tokio-postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b0cfb30d8..98c245fd3 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -158,7 +158,7 @@ pub mod types; /// /// Requires the `runtime` Cargo feature (enabled by default). /// -/// [`Config`]: ./Config.t.html +/// [`Config`]: config/struct.Config.html #[cfg(feature = "runtime")] pub async fn connect( config: &str, From 2cc5bbf21b654c26bfd8b1e80b1e5c7cbd81235f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 8 Sep 2019 16:56:31 -0700 Subject: [PATCH 236/819] Inline buffer methods --- postgres-protocol/src/message/backend.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 909f8bfd3..0cf1c4f0c 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -285,14 +285,17 @@ struct Buffer { } impl Buffer { + #[inline] fn slice(&self) -> &[u8] { &self.bytes[self.idx..] } + #[inline] fn is_empty(&self) -> bool { self.slice().is_empty() } + #[inline] fn read_cstr(&mut self) -> io::Result { match memchr(0, self.slice()) { Some(pos) => { @@ -309,6 +312,7 @@ impl Buffer { } } + #[inline] fn read_all(&mut self) -> Bytes { let buf = self.bytes.slice_from(self.idx); self.idx = self.bytes.len(); @@ -317,6 +321,7 @@ impl Buffer { } impl Read for Buffer { + #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result { let len = { let slice = self.slice(); From b7fe6bece5dd11ffc04d8178d5105f9e1a354ebd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 11 Sep 2019 22:19:54 -0400 Subject: [PATCH 237/819] Update to newest nightly --- tokio-postgres/src/lib.rs | 5 ++++- tokio-postgres/src/prepare.rs | 16 ++++++++++------ tokio-postgres/tests/test/main.rs | 3 ++- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 98c245fd3..29f378a6c 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -168,7 +168,10 @@ where T: MakeTlsConnect, { let config = config.parse::()?; - config.connect(tls).await + // FIXME https://github.com/rust-lang/rust/issues/64391 + async move { + config.connect(tls).await + }.await } /// An asynchronous notification. diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 4ad890274..7db3a5b18 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -106,6 +106,10 @@ pub fn prepare( } } +fn prepare_rec(client: Arc, query: &str, types: &[Type]) -> Pin> + 'static + Send>> { + Box::pin(prepare(client, query, types)) +} + fn encode(name: &str, query: &str, types: &[Type]) -> Result, Error> { let mut buf = vec![]; frontend::parse(name, query, types.iter().map(Type::oid), &mut buf).map_err(Error::encode)?; @@ -182,10 +186,10 @@ async fn typeinfo_statement(client: &Arc) -> Result stmt, Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { - Box::pin(prepare(client.clone(), TYPEINFO_FALLBACK_QUERY, &[])).await? + prepare_rec(client.clone(), TYPEINFO_FALLBACK_QUERY, &[]).await? } Err(e) => return Err(e), }; @@ -209,10 +213,10 @@ async fn typeinfo_enum_statement(client: &Arc) -> Result stmt, Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { - Box::pin(prepare(client.clone(), TYPEINFO_ENUM_FALLBACK_QUERY, &[])).await? + prepare_rec(client.clone(), TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await? } Err(e) => return Err(e), }; @@ -233,7 +237,7 @@ async fn get_composite_fields(client: &Arc, oid: Oid) -> Result) -> Result Result<(Client, Connection), Error> { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let config = s.parse::().unwrap(); - config.connect_raw(socket, NoTls).await + // FIXME https://github.com/rust-lang/rust/issues/64391 + async move { config.connect_raw(socket, NoTls).await }.await } async fn connect(s: &str) -> Client { From 5283ad4a0821595371b2aa2f0846c1c8e9ee3946 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 19 Sep 2019 14:45:15 -0700 Subject: [PATCH 238/819] Clean up workaround --- tokio-postgres/src/lib.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 29f378a6c..785709073 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -168,10 +168,7 @@ where T: MakeTlsConnect, { let config = config.parse::()?; - // FIXME https://github.com/rust-lang/rust/issues/64391 - async move { - config.connect(tls).await - }.await + return config.connect(tls).await; } /// An asynchronous notification. From 1fa48387174d97dbd035006777d5178f72d1115d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 19 Sep 2019 14:48:34 -0700 Subject: [PATCH 239/819] Upgrade to tokio alpha.5 --- postgres-native-tls/Cargo.toml | 6 +++--- postgres-openssl/Cargo.toml | 6 +++--- postgres/Cargo.toml | 6 +++--- tokio-postgres/Cargo.toml | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 480230391..7c436fbac 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures-preview = "=0.3.0-alpha.18" native-tls = "0.2" -tokio-io = "=0.2.0-alpha.4" -tokio-tls = "=0.3.0-alpha.4" +tokio-io = "=0.2.0-alpha.5" +tokio-tls = "=0.3.0-alpha.5" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.4" +tokio = "=0.2.0-alpha.5" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index ed1aa6c13..695befeb2 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures-preview = "=0.3.0-alpha.18" openssl = "0.10" -tokio-io = "=0.2.0-alpha.4" -tokio-openssl = "=0.4.0-alpha.4" +tokio-io = "=0.2.0-alpha.5" +tokio-openssl = "=0.4.0-alpha.5" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.4" +tokio = "=0.2.0-alpha.5" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 38110b322..dee0fc2c2 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -33,11 +33,11 @@ fallible-iterator = "0.2" futures-preview = "=0.3.0-alpha.18" pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } -tokio-executor = "=0.2.0-alpha.4" +tokio-executor = "=0.2.0-alpha.5" -tokio = { version = "=0.2.0-alpha.4", optional = true } +tokio = { version = "=0.2.0-alpha.5", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.4" +tokio = "=0.2.0-alpha.5" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 36b84abe6..8e5184a2c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,9 +40,9 @@ percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio = { version = "=0.2.0-alpha.4", default-features = false, features = ["io", "codec"] } +tokio = { version = "=0.2.0-alpha.5", default-features = false, features = ["io", "codec"] } -tokio-executor = { version = "=0.2.0-alpha.4", optional = true } +tokio-executor = { version = "=0.2.0-alpha.5", optional = true } lazy_static = { version = "1.0", optional = true } bit-vec-05 = { version = "0.5", package = "bit-vec", optional = true } @@ -54,5 +54,5 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.4" +tokio = "=0.2.0-alpha.5" env_logger = "0.5" From 1fcf026af67db8853b9875fd47dc620cc0d7c751 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Sep 2019 15:52:27 -0700 Subject: [PATCH 240/819] Remove workaround --- tokio-postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 785709073..98c245fd3 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -168,7 +168,7 @@ where T: MakeTlsConnect, { let config = config.parse::()?; - return config.connect(tls).await; + config.connect(tls).await } /// An asynchronous notification. From e2d305033e1d7796fc99bea211955ba99d3448ad Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Sep 2019 15:57:58 -0700 Subject: [PATCH 241/819] Remove vec workaround in tests --- tokio-postgres/tests/test/types/bit_vec_07.rs | 4 +- tokio-postgres/tests/test/types/chrono_04.rs | 14 +++---- tokio-postgres/tests/test/types/eui48_04.rs | 2 +- tokio-postgres/tests/test/types/geo_010.rs | 6 +-- tokio-postgres/tests/test/types/mod.rs | 38 +++++++++---------- .../tests/test/types/serde_json_1.rs | 4 +- tokio-postgres/tests/test/types/uuid_07.rs | 2 +- 7 files changed, 35 insertions(+), 35 deletions(-) diff --git a/tokio-postgres/tests/test/types/bit_vec_07.rs b/tokio-postgres/tests/test/types/bit_vec_07.rs index 4c0e426fb..5ea4a00bb 100644 --- a/tokio-postgres/tests/test/types/bit_vec_07.rs +++ b/tokio-postgres/tests/test/types/bit_vec_07.rs @@ -9,7 +9,7 @@ async fn test_bit_params() { bv.pop(); test_type( "BIT(14)", - vec![(Some(bv), "B'01101001000001'"), (None, "NULL")], + &[(Some(bv), "B'01101001000001'"), (None, "NULL")], ) .await } @@ -21,7 +21,7 @@ async fn test_varbit_params() { bv.pop(); test_type( "VARBIT", - vec![ + &[ (Some(bv), "B'01101001000001'"), (Some(BitVec::from_bytes(&[])), "B''"), (None, "NULL"), diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index a48346bc4..13c8dc14f 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -13,7 +13,7 @@ async fn test_naive_date_time_params() { } test_type( "TIMESTAMP", - vec![ + &[ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), @@ -35,7 +35,7 @@ async fn test_with_special_naive_date_time_params() { } test_type( "TIMESTAMP", - vec![ + &[ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), @@ -59,7 +59,7 @@ async fn test_date_time_params() { } test_type( "TIMESTAMP WITH TIME ZONE", - vec![ + &[ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), @@ -82,7 +82,7 @@ async fn test_with_special_date_time_params() { } test_type( "TIMESTAMP WITH TIME ZONE", - vec![ + &[ make_check("'1970-01-01 00:00:00.010000000'"), make_check("'1965-09-25 11:19:33.100314000'"), make_check("'2010-02-09 23:11:45.120200000'"), @@ -103,7 +103,7 @@ async fn test_date_params() { } test_type( "DATE", - vec![ + &[ make_check("'1970-01-01'"), make_check("'1965-09-25'"), make_check("'2010-02-09'"), @@ -123,7 +123,7 @@ async fn test_with_special_date_params() { } test_type( "DATE", - vec![ + &[ make_check("'1970-01-01'"), make_check("'1965-09-25'"), make_check("'2010-02-09'"), @@ -144,7 +144,7 @@ async fn test_time_params() { } test_type( "TIME", - vec![ + &[ make_check("'00:00:00.010000000'"), make_check("'11:19:33.100314000'"), make_check("'23:11:45.120200000'"), diff --git a/tokio-postgres/tests/test/types/eui48_04.rs b/tokio-postgres/tests/test/types/eui48_04.rs index 0cfb7cb58..074faa37e 100644 --- a/tokio-postgres/tests/test/types/eui48_04.rs +++ b/tokio-postgres/tests/test/types/eui48_04.rs @@ -6,7 +6,7 @@ use crate::types::test_type; async fn test_eui48_params() { test_type( "MACADDR", - vec![ + &[ ( Some(MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), "'12-34-56-ab-cd-ef'", diff --git a/tokio-postgres/tests/test/types/geo_010.rs b/tokio-postgres/tests/test/types/geo_010.rs index ecb1cbfcd..6e3d835b9 100644 --- a/tokio-postgres/tests/test/types/geo_010.rs +++ b/tokio-postgres/tests/test/types/geo_010.rs @@ -6,7 +6,7 @@ use crate::types::test_type; async fn test_point_params() { test_type( "POINT", - vec![ + &[ (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), (None, "NULL"), @@ -19,7 +19,7 @@ async fn test_point_params() { async fn test_box_params() { test_type( "BOX", - vec![ + &[ ( Some(Rect { min: Coordinate { x: -3.14, y: 1.618 }, @@ -48,7 +48,7 @@ async fn test_path_params() { ]; test_type( "PATH", - vec![ + &[ ( Some(LineString(points)), "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 46d451444..89cce955a 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -25,7 +25,7 @@ mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; -async fn test_type(sql_type: &str, checks: Vec<(T, S)>) +async fn test_type(sql_type: &str, checks: &[(T, S)]) where T: PartialEq + for<'a> FromSqlOwned + ToSql, S: fmt::Display, @@ -43,7 +43,7 @@ where .await .unwrap(); let result = rows[0].get(0); - assert_eq!(val, result); + assert_eq!(val, &result); let stmt = client .prepare(&format!("SELECT $1::{}", sql_type)) @@ -55,7 +55,7 @@ where .await .unwrap(); let result = rows[0].get(0); - assert_eq!(val, result); + assert_eq!(val, &result); } } @@ -63,21 +63,21 @@ where async fn test_bool_params() { test_type( "BOOL", - vec![(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], + &[(Some(true), "'t'"), (Some(false), "'f'"), (None, "NULL")], ) .await; } #[tokio::test] async fn test_i8_params() { - test_type("\"char\"", vec![(Some('a' as i8), "'a'"), (None, "NULL")]).await; + test_type("\"char\"", &[(Some('a' as i8), "'a'"), (None, "NULL")]).await; } #[tokio::test] async fn test_name_params() { test_type( "NAME", - vec![ + &[ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -93,7 +93,7 @@ async fn test_name_params() { async fn test_i16_params() { test_type( "SMALLINT", - vec![ + &[ (Some(15001i16), "15001"), (Some(-15001i16), "-15001"), (None, "NULL"), @@ -106,7 +106,7 @@ async fn test_i16_params() { async fn test_i32_params() { test_type( "INT", - vec![ + &[ (Some(2_147_483_548i32), "2147483548"), (Some(-2_147_483_548i32), "-2147483548"), (None, "NULL"), @@ -119,7 +119,7 @@ async fn test_i32_params() { async fn test_oid_params() { test_type( "OID", - vec![ + &[ (Some(2_147_483_548u32), "2147483548"), (Some(4_000_000_000), "4000000000"), (None, "NULL"), @@ -132,7 +132,7 @@ async fn test_oid_params() { async fn test_i64_params() { test_type( "BIGINT", - vec![ + &[ (Some(9_223_372_036_854_775_708i64), "9223372036854775708"), (Some(-9_223_372_036_854_775_708i64), "-9223372036854775708"), (None, "NULL"), @@ -145,7 +145,7 @@ async fn test_i64_params() { async fn test_f32_params() { test_type( "REAL", - vec![ + &[ (Some(f32::INFINITY), "'infinity'"), (Some(f32::NEG_INFINITY), "'-infinity'"), (Some(1000.55), "1000.55"), @@ -159,7 +159,7 @@ async fn test_f32_params() { async fn test_f64_params() { test_type( "DOUBLE PRECISION", - vec![ + &[ (Some(f64::INFINITY), "'infinity'"), (Some(f64::NEG_INFINITY), "'-infinity'"), (Some(10000.55), "10000.55"), @@ -173,7 +173,7 @@ async fn test_f64_params() { async fn test_varchar_params() { test_type( "VARCHAR", - vec![ + &[ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -189,7 +189,7 @@ async fn test_varchar_params() { async fn test_text_params() { test_type( "TEXT", - vec![ + &[ (Some("hello world".to_owned()), "'hello world'"), ( Some("イロハニホヘト チリヌルヲ".to_owned()), @@ -296,7 +296,7 @@ async fn test_citext_params() { async fn test_bytea_params() { test_type( "BYTEA", - vec![ + &[ (Some(vec![0u8, 1, 2, 3, 254, 255]), "'\\x00010203feff'"), (None, "NULL"), ], @@ -329,7 +329,7 @@ macro_rules! make_map { async fn test_hstore_params() { test_type( "hstore", - vec![ + &[ ( Some(make_map!("a".to_owned() => Some("1".to_owned()))), "'a=>1'", @@ -350,7 +350,7 @@ async fn test_hstore_params() { async fn test_array_params() { test_type( "integer[]", - vec![ + &[ (Some(vec![1i32, 2i32]), "ARRAY[1,2]"), (Some(vec![1i32]), "ARRAY[1]"), (Some(vec![]), "ARRAY[]"), @@ -607,7 +607,7 @@ async fn enum_() { async fn system_time() { test_type( "TIMESTAMP", - vec![ + &[ ( Some(UNIX_EPOCH + Duration::from_millis(1_010)), "'1970-01-01 00:00:01.01'", @@ -630,7 +630,7 @@ async fn system_time() { async fn inet() { test_type( "INET", - vec![ + &[ (Some("127.0.0.1".parse::().unwrap()), "'127.0.0.1'"), ( Some("127.0.0.1".parse::().unwrap()), diff --git a/tokio-postgres/tests/test/types/serde_json_1.rs b/tokio-postgres/tests/test/types/serde_json_1.rs index f48c188f2..93ec61d7b 100644 --- a/tokio-postgres/tests/test/types/serde_json_1.rs +++ b/tokio-postgres/tests/test/types/serde_json_1.rs @@ -6,7 +6,7 @@ use crate::types::test_type; async fn test_json_params() { test_type( "JSON", - vec![ + &[ ( Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), "'[10, 11, 12]'", @@ -25,7 +25,7 @@ async fn test_json_params() { async fn test_jsonb_params() { test_type( "JSONB", - vec![ + &[ ( Some(serde_json_1::from_str::("[10, 11, 12]").unwrap()), "'[10, 11, 12]'", diff --git a/tokio-postgres/tests/test/types/uuid_07.rs b/tokio-postgres/tests/test/types/uuid_07.rs index 8e60fe3a4..563cfa4cb 100644 --- a/tokio-postgres/tests/test/types/uuid_07.rs +++ b/tokio-postgres/tests/test/types/uuid_07.rs @@ -6,7 +6,7 @@ use crate::types::test_type; async fn test_uuid_params() { test_type( "UUID", - vec![ + &[ ( Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", From 6c3a4ab19208e682e150ce31b987297b9e8bb75b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 24 Sep 2019 17:03:37 -0700 Subject: [PATCH 242/819] Add channel_binding=disable/prefer/require to config Closes #487 --- postgres-native-tls/src/test.rs | 4 +- postgres-openssl/src/test.rs | 26 +++++++ postgres/src/config.rs | 10 ++- tokio-postgres/src/config.rs | 40 +++++++++++ tokio-postgres/src/connect_raw.rs | 38 ++++++++--- tokio-postgres/src/prepare.rs | 6 +- tokio-postgres/tests/test/main.rs | 109 ++++-------------------------- 7 files changed, 124 insertions(+), 109 deletions(-) diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index d4942fe8e..416a3c14d 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -12,9 +12,7 @@ where T: TlsConnect, T::Stream: 'static + Send, { - let stream = TcpStream::connect("127.0.0.1:5433") - .await - .unwrap(); + let stream = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let builder = s.parse::().unwrap(); let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index df9054a06..e3ee454ea 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -65,6 +65,32 @@ async fn scram_user() { .await; } +#[tokio::test] +async fn require_channel_binding_err() { + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_ca_file("../test/server.crt").unwrap(); + let ctx = builder.build(); + let connector = TlsConnector::new(ctx.configure().unwrap(), "localhost"); + + let stream = TcpStream::connect("127.0.0.1:5433").await.unwrap(); + let builder = "user=pass_user password=password dbname=postgres channel_binding=require" + .parse::() + .unwrap(); + builder.connect_raw(stream, connector).await.err().unwrap(); +} + +#[tokio::test] +async fn require_channel_binding_ok() { + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_ca_file("../test/server.crt").unwrap(); + let ctx = builder.build(); + smoke_test( + "user=scram_user password=password dbname=postgres channel_binding=require", + TlsConnector::new(ctx.configure().unwrap(), "localhost"), + ) + .await; +} + #[tokio::test] #[cfg(feature = "runtime")] async fn runtime() { diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 4a2d45097..354a99ce1 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -14,7 +14,7 @@ use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; #[doc(inline)] -pub use tokio_postgres::config::{SslMode, TargetSessionAttrs}; +pub use tokio_postgres::config::{SslMode, TargetSessionAttrs, ChannelBinding}; use crate::{Client, RUNTIME}; @@ -234,6 +234,14 @@ impl Config { self } + /// Sets the channel binding behavior. + /// + /// Defaults to `prefer`. + pub fn channel_binding(&mut self, channel_binding: ChannelBinding) -> &mut Config { + self.config.channel_binding(channel_binding); + self + } + /// Sets the executor used to run the connection futures. /// /// Defaults to a postgres-specific tokio `Runtime`. diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 19df1a354..0dc6d5bf8 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -46,6 +46,19 @@ pub enum SslMode { __NonExhaustive, } +/// Channel binding configuration. +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum ChannelBinding { + /// Do not use channel binding. + Disable, + /// Attempt to use channel binding but allow sessions without. + Prefer, + /// Require the use of channel binding. + Require, + #[doc(hidden)] + __NonExhaustive, +} + #[derive(Debug, Clone, PartialEq)] pub(crate) enum Host { Tcp(String), @@ -87,6 +100,9 @@ pub(crate) enum Host { /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// * `channel_binding` - Controls usage of channel binding in the authentication process. If set to `disable`, channel +/// binding will not be used. If set to `prefer`, channel binding will be used if available, but not used otherwise. +/// If set to `require`, the authentication process will fail if channel binding is not used. Defaults to `prefer`. /// /// ## Examples /// @@ -140,6 +156,7 @@ pub struct Config { pub(crate) keepalives: bool, pub(crate) keepalives_idle: Duration, pub(crate) target_session_attrs: TargetSessionAttrs, + pub(crate) channel_binding: ChannelBinding, } impl Default for Config { @@ -164,6 +181,7 @@ impl Config { keepalives: true, keepalives_idle: Duration::from_secs(2 * 60 * 60), target_session_attrs: TargetSessionAttrs::Any, + channel_binding: ChannelBinding::Prefer, } } @@ -287,6 +305,14 @@ impl Config { self } + /// Sets the channel binding behavior. + /// + /// Defaults to `prefer`. + pub fn channel_binding(&mut self, channel_binding: ChannelBinding) -> &mut Config { + self.channel_binding = channel_binding; + self + } + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { match key { "user" => { @@ -363,6 +389,19 @@ impl Config { }; self.target_session_attrs(target_session_attrs); } + "channel_binding" => { + let channel_binding = match value { + "disable" => ChannelBinding::Disable, + "prefer" => ChannelBinding::Prefer, + "require" => ChannelBinding::Require, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "channel_binding", + )))) + } + }; + self.channel_binding(channel_binding); + } key => { return Err(Error::config_parse(Box::new(UnknownOption( key.to_string(), @@ -434,6 +473,7 @@ impl fmt::Debug for Config { .field("keepalives", &self.keepalives) .field("keepalives_idle", &self.keepalives_idle) .field("target_session_attrs", &self.target_session_attrs) + .field("channel_binding", &self.channel_binding) .finish() } } diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 7b9fbd5e6..cf80a91c4 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -1,5 +1,5 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; -use crate::config::Config; +use crate::config::{self, Config}; use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::{ChannelBinding, TlsConnect}; @@ -141,8 +141,13 @@ where T: AsyncRead + AsyncWrite + Unpin, { match stream.try_next().await.map_err(Error::io)? { - Some(Message::AuthenticationOk) => return Ok(()), + Some(Message::AuthenticationOk) => { + no_channel_binding(config)?; + return Ok(()); + } Some(Message::AuthenticationCleartextPassword) => { + no_channel_binding(config)?; + let pass = config .password .as_ref() @@ -151,6 +156,8 @@ where authenticate_password(stream, pass).await?; } Some(Message::AuthenticationMd5Password(body)) => { + no_channel_binding(config)?; + let user = config .user .as_ref() @@ -164,12 +171,7 @@ where authenticate_password(stream, output.as_bytes()).await?; } Some(Message::AuthenticationSasl(body)) => { - let pass = config - .password - .as_ref() - .ok_or_else(|| Error::config("password missing".into()))?; - - authenticate_sasl(stream, body, channel_binding, pass).await?; + authenticate_sasl(stream, body, channel_binding, config).await?; } Some(Message::AuthenticationKerberosV5) | Some(Message::AuthenticationScmCredential) @@ -192,6 +194,16 @@ where } } +fn no_channel_binding(config: &Config) -> Result<(), Error> { + match config.channel_binding { + config::ChannelBinding::Disable | config::ChannelBinding::Prefer => Ok(()), + config::ChannelBinding::Require => Err(Error::authentication( + "server did not use channel binding".into(), + )), + config::ChannelBinding::__NonExhaustive => unreachable!(), + } +} + async fn authenticate_password( stream: &mut StartupStream, password: &[u8], @@ -213,12 +225,17 @@ async fn authenticate_sasl( stream: &mut StartupStream, body: AuthenticationSaslBody, channel_binding: ChannelBinding, - password: &[u8], + config: &Config, ) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { + let password = config + .password + .as_ref() + .ok_or_else(|| Error::config("password missing".into()))?; + let mut has_scram = false; let mut has_scram_plus = false; let mut mechanisms = body.mechanisms(); @@ -232,6 +249,7 @@ where let channel_binding = channel_binding .tls_server_end_point + .filter(|_| config.channel_binding != config::ChannelBinding::Disable) .map(sasl::ChannelBinding::tls_server_end_point); let (channel_binding, mechanism) = if has_scram_plus { @@ -240,6 +258,8 @@ where None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), } } else if has_scram { + no_channel_binding(config)?; + match channel_binding { Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 7db3a5b18..c3f70c41f 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -106,7 +106,11 @@ pub fn prepare( } } -fn prepare_rec(client: Arc, query: &str, types: &[Type]) -> Pin> + 'static + Send>> { +fn prepare_rec( + client: Arc, + query: &str, + types: &[Type], +) -> Pin> + 'static + Send>> { Box::pin(prepare(client, query, types)) } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 032400d9b..802e9149b 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -20,8 +20,7 @@ mod types; async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let config = s.parse::().unwrap(); - // FIXME https://github.com/rust-lang/rust/issues/64391 - async move { config.connect_raw(socket, NoTls).await }.await + config.connect_raw(socket, NoTls).await } async fn connect(s: &str) -> Client { @@ -608,100 +607,20 @@ async fn query_portal() { assert_eq!(r3.len(), 0); } -/* -#[test] -fn poll_idle_running() { - struct DelayStream(Delay); - - impl Stream for DelayStream { - type Item = Vec; - type Error = tokio_postgres::Error; - - fn poll(&mut self) -> Poll>, tokio_postgres::Error> { - try_ready!(self.0.poll().map_err(|e| panic!("{}", e))); - QUERY_DONE.store(true, Ordering::SeqCst); - Ok(Async::Ready(None)) - } - } - - struct IdleFuture(tokio_postgres::Client); - - impl Future for IdleFuture { - type Item = (); - type Error = tokio_postgres::Error; - - fn poll(&mut self) -> Poll<(), tokio_postgres::Error> { - try_ready!(self.0.poll_idle()); - assert!(QUERY_DONE.load(Ordering::SeqCst)); - Ok(Async::Ready(())) - } - } - - static QUERY_DONE: AtomicBool = AtomicBool::new(false); - - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); - - let execute = client - .simple_query("CREATE TEMPORARY TABLE foo (id INT)") - .for_each(|_| Ok(())); - runtime.block_on(execute).unwrap(); - - let prepare = client.prepare("COPY foo FROM STDIN"); - let stmt = runtime.block_on(prepare).unwrap(); - let copy_in = client.copy_in( - &stmt, - &[], - DelayStream(Delay::new(Instant::now() + Duration::from_millis(10))), - ); - let copy_in = copy_in.map(|_| ()).map_err(|e| panic!("{}", e)); - runtime.spawn(copy_in); - - let future = IdleFuture(client); - runtime.block_on(future).unwrap(); +#[tokio::test] +async fn require_channel_binding() { + connect_raw("user=postgres channel_binding=require") + .await + .err() + .unwrap(); } -#[test] -fn poll_idle_new() { - struct IdleFuture { - client: tokio_postgres::Client, - prepare: Option, - } - - impl Future for IdleFuture { - type Item = (); - type Error = tokio_postgres::Error; - - fn poll(&mut self) -> Poll<(), tokio_postgres::Error> { - match self.prepare.take() { - Some(_future) => { - assert!(!self.client.poll_idle().unwrap().is_ready()); - Ok(Async::NotReady) - } - None => { - assert!(self.client.poll_idle().unwrap().is_ready()); - Ok(Async::Ready(())) - } - } - } - } - - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let (mut client, connection) = runtime.block_on(connect("user=postgres")).unwrap(); - let connection = connection.map_err(|e| panic!("{}", e)); - runtime.handle().spawn(connection).unwrap(); +#[tokio::test] +async fn prefer_channel_binding() { + connect("user=postgres channel_binding=prefer").await; +} - let prepare = client.prepare(""); - let future = IdleFuture { - client, - prepare: Some(prepare), - }; - runtime.block_on(future).unwrap(); +#[tokio::test] +async fn disable_channel_binding() { + connect("user=postgres channel_binding=disable").await; } -*/ From 7b230592dbb6313cac6ec0837c3f1146142cd1c5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 17:44:45 -0700 Subject: [PATCH 243/819] Some cleanup --- tokio-postgres/src/connect_raw.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index cf80a91c4..b616d4455 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -142,11 +142,11 @@ where { match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationOk) => { - no_channel_binding(config)?; + can_skip_channel_binding(config)?; return Ok(()); } Some(Message::AuthenticationCleartextPassword) => { - no_channel_binding(config)?; + can_skip_channel_binding(config)?; let pass = config .password @@ -156,7 +156,7 @@ where authenticate_password(stream, pass).await?; } Some(Message::AuthenticationMd5Password(body)) => { - no_channel_binding(config)?; + can_skip_channel_binding(config)?; let user = config .user @@ -194,7 +194,7 @@ where } } -fn no_channel_binding(config: &Config) -> Result<(), Error> { +fn can_skip_channel_binding(config: &Config) -> Result<(), Error> { match config.channel_binding { config::ChannelBinding::Disable | config::ChannelBinding::Prefer => Ok(()), config::ChannelBinding::Require => Err(Error::authentication( @@ -258,7 +258,7 @@ where None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), } } else if has_scram { - no_channel_binding(config)?; + can_skip_channel_binding(config)?; match channel_binding { Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), From 8079ed6766e8375f25799f53883ad88d96bdabb8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 17:46:15 -0700 Subject: [PATCH 244/819] Check channel binding requirements when unsupported locally --- tokio-postgres/src/connect_raw.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index b616d4455..a9b9e10e6 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -255,7 +255,11 @@ where let (channel_binding, mechanism) = if has_scram_plus { match channel_binding { Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), - None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), + None => { + can_skip_channel_binding(config)?; + + (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256) + }, } } else if has_scram { can_skip_channel_binding(config)?; From c9469ea8263d9b5468f8530c462c066b782a03c6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 17:59:33 -0700 Subject: [PATCH 245/819] Simplify logic --- tokio-postgres/src/connect_raw.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index a9b9e10e6..4ac5c667c 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -256,14 +256,10 @@ where match channel_binding { Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), None => { - can_skip_channel_binding(config)?; - (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256) }, } } else if has_scram { - can_skip_channel_binding(config)?; - match channel_binding { Some(_) => (sasl::ChannelBinding::unrequested(), sasl::SCRAM_SHA_256), None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), @@ -272,6 +268,10 @@ where return Err(Error::authentication("unsupported SASL mechanism".into())); }; + if mechanism != sasl::SCRAM_SHA_256_PLUS { + can_skip_channel_binding(config)?; + } + let mut scram = ScramSha256::new(password, channel_binding); let mut buf = vec![]; From 680f7b8ecb8281ba622104f0db51eab46a228384 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 18:22:59 -0700 Subject: [PATCH 246/819] Start prepping for futures closing over parameters Change the slice-consuming methods to requiring &(dyn ToSql + Sync), which makes the overall value Send. If you have non-Sync values for whatever reason, you can still use the iterator-based methods. --- tokio-postgres/src/client.rs | 16 +++++++------- tokio-postgres/src/transaction.rs | 12 +++++------ tokio-postgres/tests/test/types/mod.rs | 29 +++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 15 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index ba43be6d8..56e131437 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -197,9 +197,9 @@ impl Client { pub fn query( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { - let buf = query::encode(statement, params.iter().cloned()); + let buf = query::encode(statement, params.iter().map(|s| *s as _)); query::query(self.inner(), statement.clone(), buf) } @@ -229,9 +229,9 @@ impl Client { pub fn execute( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Future> { - let buf = query::encode(statement, params.iter().cloned()); + let buf = query::encode(statement, params.iter().map(|s| *s as _)); query::execute(self.inner(), buf) } @@ -262,7 +262,7 @@ impl Client { pub fn copy_in( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], stream: S, ) -> impl Future> where @@ -271,7 +271,7 @@ impl Client { ::Buf: 'static + Send, S::Error: Into>, { - let buf = query::encode(statement, params.iter().cloned()); + let buf = query::encode(statement, params.iter().map(|s| *s as _)); copy_in::copy_in(self.inner(), buf, stream) } @@ -283,9 +283,9 @@ impl Client { pub fn copy_out( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { - let buf = query::encode(statement, params.iter().cloned()); + let buf = query::encode(statement, params.iter().map(|s| *s as _)); copy_out::copy_out(self.inner(), buf) } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 0489f09f4..64b86abfb 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -95,7 +95,7 @@ impl<'a> Transaction<'a> { pub fn query( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { self.client.query(statement, params) } @@ -119,7 +119,7 @@ impl<'a> Transaction<'a> { pub fn execute( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Future> { self.client.execute(statement, params) } @@ -150,10 +150,10 @@ impl<'a> Transaction<'a> { pub fn bind( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Future> { // https://github.com/rust-lang/rust/issues/63032 - let buf = bind::encode(statement, params.iter().cloned()); + let buf = bind::encode(statement, params.iter().map(|s| *s as _)); bind::bind(self.client.inner(), statement.clone(), buf) } @@ -189,7 +189,7 @@ impl<'a> Transaction<'a> { pub fn copy_in( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], stream: S, ) -> impl Future> where @@ -205,7 +205,7 @@ impl<'a> Transaction<'a> { pub fn copy_out( &mut self, statement: &Statement, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { self.client.copy_out(statement, params) } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 89cce955a..09b34cc2c 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -27,7 +27,7 @@ mod uuid_07; async fn test_type(sql_type: &str, checks: &[(T, S)]) where - T: PartialEq + for<'a> FromSqlOwned + ToSql, + T: PartialEq + for<'a> FromSqlOwned + ToSql + Sync, S: fmt::Display, { let mut client = connect("user=postgres").await; @@ -656,3 +656,30 @@ async fn inet() { ) .await; } + +#[tokio::test] +async fn check_send() { + fn is_send(_: &T) {} + + let mut client = connect("user=postgres").await; + + let f = client.prepare("SELECT $1::TEXT"); + is_send(&f); + let stmt = f.await.unwrap(); + + let f = client.query(&stmt, &[&"hello"]); + is_send(&f); + + let f = client.execute(&stmt, &[&"hello"]); + is_send(&f); + + let f = client.transaction(); + is_send(&f); + let mut trans = f.await.unwrap(); + + let f = trans.query(&stmt, &[&"hello"]); + is_send(&f); + + let f = trans.execute(&stmt, &[&"hello"]); + is_send(&f); +} From 427340d7484836d33d5fce5bd45cc79cee3a8db1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 18:30:00 -0700 Subject: [PATCH 247/819] Ensure connect future is Send --- tokio-postgres/tests/test/types/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 09b34cc2c..224a27868 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -661,7 +661,9 @@ async fn inet() { async fn check_send() { fn is_send(_: &T) {} - let mut client = connect("user=postgres").await; + let f = connect("user=postgres"); + is_send(&f); + let mut client = f.await; let f = client.prepare("SELECT $1::TEXT"); is_send(&f); From 983a71c4702fa942feabb72dd4a7140e81162425 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Sep 2019 18:39:51 -0700 Subject: [PATCH 248/819] Fix postgres --- postgres/src/client.rs | 10 +++++----- postgres/src/transaction.rs | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index bb53e0a7c..4cf088a7c 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -80,7 +80,7 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + pub fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement, { @@ -119,7 +119,7 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement, { @@ -155,7 +155,7 @@ impl Client { pub fn query_iter<'a, T>( &'a mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> Result + 'a, Error> where T: ?Sized + ToStatement, @@ -242,7 +242,7 @@ impl Client { pub fn copy_in( &mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], reader: R, ) -> Result where @@ -275,7 +275,7 @@ impl Client { pub fn copy_out<'a, T>( &'a mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 65ac5176f..9ecbe75f6 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -43,7 +43,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute`. - pub fn execute(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + pub fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement, { @@ -52,7 +52,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::query`. - pub fn query(&mut self, query: &T, params: &[&dyn ToSql]) -> Result, Error> + pub fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement, { @@ -63,7 +63,7 @@ impl<'a> Transaction<'a> { pub fn query_iter( &mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> Result, Error> where T: ?Sized + ToStatement, @@ -82,7 +82,7 @@ impl<'a> Transaction<'a> { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn bind(&mut self, query: &T, params: &[&dyn ToSql]) -> Result + pub fn bind(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement, { @@ -112,7 +112,7 @@ impl<'a> Transaction<'a> { pub fn copy_in( &mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], reader: R, ) -> Result where @@ -127,7 +127,7 @@ impl<'a> Transaction<'a> { pub fn copy_out<'b, T>( &'a mut self, query: &T, - params: &[&dyn ToSql], + params: &[&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, From de20f0fcd68eff72629ee364414de5ab6ff93de5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 26 Sep 2019 18:31:51 -0700 Subject: [PATCH 249/819] Fix bit-vec-support --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/types/{bit_vec_05.rs => bit_vec_06.rs} | 2 +- tokio-postgres/src/types/mod.rs | 4 ++-- .../tests/test/types/{bit_vec_07.rs => bit_vec_06.rs} | 2 +- tokio-postgres/tests/test/types/mod.rs | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) rename tokio-postgres/src/types/{bit_vec_05.rs => bit_vec_06.rs} (96%) rename tokio-postgres/tests/test/types/{bit_vec_07.rs => bit_vec_06.rs} (96%) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 12187155a..362556f83 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,7 @@ tokio = { version = "=0.2.0-alpha.5", default-features = false, features = ["io" tokio-executor = { version = "=0.2.0-alpha.5", optional = true } lazy_static = { version = "1.0", optional = true } -bit-vec-06 = { version = "0.6.1", package = "bit-vec", optional = true } +bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } diff --git a/tokio-postgres/src/types/bit_vec_05.rs b/tokio-postgres/src/types/bit_vec_06.rs similarity index 96% rename from tokio-postgres/src/types/bit_vec_05.rs rename to tokio-postgres/src/types/bit_vec_06.rs index 4a0cdb91c..a68ed35a8 100644 --- a/tokio-postgres/src/types/bit_vec_05.rs +++ b/tokio-postgres/src/types/bit_vec_06.rs @@ -1,4 +1,4 @@ -use bit_vec_05::BitVec; +use bit_vec_06::BitVec; use postgres_protocol::types; use std::error::Error; diff --git a/tokio-postgres/src/types/mod.rs b/tokio-postgres/src/types/mod.rs index a5f7604c0..f938bd7cc 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/tokio-postgres/src/types/mod.rs @@ -73,8 +73,8 @@ where v.to_sql(ty, out) } -#[cfg(feature = "with-bit-vec-0_5")] -mod bit_vec_05; +#[cfg(feature = "with-bit-vec-0_6")] +mod bit_vec_06; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; #[cfg(feature = "with-eui48-0_4")] diff --git a/tokio-postgres/tests/test/types/bit_vec_07.rs b/tokio-postgres/tests/test/types/bit_vec_06.rs similarity index 96% rename from tokio-postgres/tests/test/types/bit_vec_07.rs rename to tokio-postgres/tests/test/types/bit_vec_06.rs index 5ea4a00bb..4d01dc2f2 100644 --- a/tokio-postgres/tests/test/types/bit_vec_07.rs +++ b/tokio-postgres/tests/test/types/bit_vec_06.rs @@ -1,4 +1,4 @@ -use bit_vec_07::BitVec; +use bit_vec_06::BitVec; use crate::types::test_type; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 224a27868..b89434fa4 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -12,8 +12,8 @@ use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, Wr use crate::connect; -#[cfg(feature = "with-bit-vec-0_7")] -mod bit_vec_07; +#[cfg(feature = "with-bit-vec-0_6")] +mod bit_vec_06; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; #[cfg(feature = "with-eui48-0_4")] From 30bd89c5249a4be9a74b344f7efaf58c6010241b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 30 Sep 2019 18:26:23 -0700 Subject: [PATCH 250/819] Upgrade tokio and futures-preview --- postgres-native-tls/Cargo.toml | 8 ++++---- postgres-openssl/Cargo.toml | 8 ++++---- postgres/Cargo.toml | 8 ++++---- tokio-postgres/Cargo.toml | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 7c436fbac..c56b0831c 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "=0.3.0-alpha.18" +futures-preview = "=0.3.0-alpha.19" native-tls = "0.2" -tokio-io = "=0.2.0-alpha.5" -tokio-tls = "=0.3.0-alpha.5" +tokio-io = "=0.2.0-alpha.6" +tokio-tls = "=0.3.0-alpha.6" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.5" +tokio = "=0.2.0-alpha.6" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 695befeb2..60be19c5b 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "=0.3.0-alpha.18" +futures-preview = "=0.3.0-alpha.19" openssl = "0.10" -tokio-io = "=0.2.0-alpha.5" -tokio-openssl = "=0.4.0-alpha.5" +tokio-io = "=0.2.0-alpha.6" +tokio-openssl = "=0.4.0-alpha.6" tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.5" +tokio = "=0.2.0-alpha.6" postgres = { version = "0.16.0-rc.1", path = "../postgres" } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 582c462ee..d07664eba 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -30,14 +30,14 @@ runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = "=0.3.0-alpha.18" +futures-preview = "=0.3.0-alpha.19" pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } -tokio-executor = "=0.2.0-alpha.5" +tokio-executor = "=0.2.0-alpha.6" -tokio = { version = "=0.2.0-alpha.5", optional = true } +tokio = { version = "=0.2.0-alpha.6", optional = true } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.5" +tokio = "=0.2.0-alpha.6" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 362556f83..63e174d58 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,16 +33,16 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -futures-preview = { version = "=0.3.0-alpha.18", features = ["nightly", "async-await"] } +futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] } log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } -tokio = { version = "=0.2.0-alpha.5", default-features = false, features = ["io", "codec"] } +tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } -tokio-executor = { version = "=0.2.0-alpha.5", optional = true } +tokio-executor = { version = "=0.2.0-alpha.6", optional = true } lazy_static = { version = "1.0", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } @@ -54,5 +54,5 @@ serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.5" +tokio = "=0.2.0-alpha.6" env_logger = "0.5" From e155af64198cadcb623b67d7b15c8e52087f5500 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 3 Oct 2019 18:09:10 -0700 Subject: [PATCH 251/819] Fix unix socket connection Closes #491 --- tokio-postgres/src/connect_socket.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index e87fa0b37..7bd858dd9 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -30,6 +30,7 @@ pub(crate) async fn connect_socket( } #[cfg(unix)] Host::Unix(path) => { + let path = path.join(format!(".s.PGSQL.{}", port)); let socket = connect_with_timeout(UnixStream::connect(path), connect_timeout).await?; Ok(Socket::new_unix(socket)) } From d8b54126022c4406e465fe635105a005ccf2ec59 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 3 Oct 2019 18:25:25 -0700 Subject: [PATCH 252/819] Upgrade errcodes to Postgres 12 --- codegen/src/errcodes.txt | 20 +- postgres/src/config.rs | 2 +- tokio-postgres/src/connect_raw.rs | 4 +- tokio-postgres/src/error/sqlstate.rs | 587 +++++++++++++++------------ 4 files changed, 347 insertions(+), 266 deletions(-) diff --git a/codegen/src/errcodes.txt b/codegen/src/errcodes.txt index 4f3547176..867e98b69 100644 --- a/codegen/src/errcodes.txt +++ b/codegen/src/errcodes.txt @@ -2,7 +2,7 @@ # errcodes.txt # PostgreSQL error codes # -# Copyright (c) 2003-2017, PostgreSQL Global Development Group +# Copyright (c) 2003-2019, PostgreSQL Global Development Group # # This list serves as the basis for generating source files containing error # codes. It is kept in a common format to make sure all these source files have @@ -18,7 +18,7 @@ # src/pl/tcl/pltclerrcodes.h # the same, for PL/Tcl # -# doc/src/sgml/errcodes-list.sgml +# doc/src/sgml/errcodes-table.sgml # a SGML table of error codes for inclusion in the documentation # # The format of this file is one error code per line, with the following @@ -177,6 +177,7 @@ Section: Class 22 - Data Exception 22P06 E ERRCODE_NONSTANDARD_USE_OF_ESCAPE_CHARACTER nonstandard_use_of_escape_character 22010 E ERRCODE_INVALID_INDICATOR_PARAMETER_VALUE invalid_indicator_parameter_value 22023 E ERRCODE_INVALID_PARAMETER_VALUE invalid_parameter_value +22013 E ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE invalid_preceding_or_following_size 2201B E ERRCODE_INVALID_REGULAR_EXPRESSION invalid_regular_expression 2201W E ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE invalid_row_count_in_limit_clause 2201X E ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE invalid_row_count_in_result_offset_clause @@ -205,6 +206,21 @@ Section: Class 22 - Data Exception 2200N E ERRCODE_INVALID_XML_CONTENT invalid_xml_content 2200S E ERRCODE_INVALID_XML_COMMENT invalid_xml_comment 2200T E ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION invalid_xml_processing_instruction +22030 E ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE duplicate_json_object_key_value +22032 E ERRCODE_INVALID_JSON_TEXT invalid_json_text +22033 E ERRCODE_INVALID_SQL_JSON_SUBSCRIPT invalid_sql_json_subscript +22034 E ERRCODE_MORE_THAN_ONE_SQL_JSON_ITEM more_than_one_sql_json_item +22035 E ERRCODE_NO_SQL_JSON_ITEM no_sql_json_item +22036 E ERRCODE_NON_NUMERIC_SQL_JSON_ITEM non_numeric_sql_json_item +22037 E ERRCODE_NON_UNIQUE_KEYS_IN_A_JSON_OBJECT non_unique_keys_in_a_json_object +22038 E ERRCODE_SINGLETON_SQL_JSON_ITEM_REQUIRED singleton_sql_json_item_required +22039 E ERRCODE_SQL_JSON_ARRAY_NOT_FOUND sql_json_array_not_found +2203A E ERRCODE_SQL_JSON_MEMBER_NOT_FOUND sql_json_member_not_found +2203B E ERRCODE_SQL_JSON_NUMBER_NOT_FOUND sql_json_number_not_found +2203C E ERRCODE_SQL_JSON_OBJECT_NOT_FOUND sql_json_object_not_found +2203D E ERRCODE_TOO_MANY_JSON_ARRAY_ELEMENTS too_many_json_array_elements +2203E E ERRCODE_TOO_MANY_JSON_OBJECT_MEMBERS too_many_json_object_members +2203F E ERRCODE_SQL_JSON_SCALAR_REQUIRED sql_json_scalar_required Section: Class 23 - Integrity Constraint Violation diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 354a99ce1..a7702ef88 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -14,7 +14,7 @@ use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; #[doc(inline)] -pub use tokio_postgres::config::{SslMode, TargetSessionAttrs, ChannelBinding}; +pub use tokio_postgres::config::{ChannelBinding, SslMode, TargetSessionAttrs}; use crate::{Client, RUNTIME}; diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 4ac5c667c..dbbc71a59 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -255,9 +255,7 @@ where let (channel_binding, mechanism) = if has_scram_plus { match channel_binding { Some(channel_binding) => (channel_binding, sasl::SCRAM_SHA_256_PLUS), - None => { - (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256) - }, + None => (sasl::ChannelBinding::unsupported(), sasl::SCRAM_SHA_256), } } else if has_scram { match channel_binding { diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 60c18e42e..b6aff0efe 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -190,6 +190,9 @@ impl SqlState { /// 22023 pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22023")); + /// 22013 + pub const INVALID_PRECEDING_OR_FOLLOWING_SIZE: SqlState = SqlState(Cow::Borrowed("22013")); + /// 2201B pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Cow::Borrowed("2201B")); @@ -275,6 +278,51 @@ impl SqlState { /// 2200T pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Cow::Borrowed("2200T")); + /// 22030 + pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Cow::Borrowed("22030")); + + /// 22032 + pub const INVALID_JSON_TEXT: SqlState = SqlState(Cow::Borrowed("22032")); + + /// 22033 + pub const INVALID_SQL_JSON_SUBSCRIPT: SqlState = SqlState(Cow::Borrowed("22033")); + + /// 22034 + pub const MORE_THAN_ONE_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22034")); + + /// 22035 + pub const NO_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22035")); + + /// 22036 + pub const NON_NUMERIC_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22036")); + + /// 22037 + pub const NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: SqlState = SqlState(Cow::Borrowed("22037")); + + /// 22038 + pub const SINGLETON_SQL_JSON_ITEM_REQUIRED: SqlState = SqlState(Cow::Borrowed("22038")); + + /// 22039 + pub const SQL_JSON_ARRAY_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("22039")); + + /// 2203A + pub const SQL_JSON_MEMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203A")); + + /// 2203B + pub const SQL_JSON_NUMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203B")); + + /// 2203C + pub const SQL_JSON_OBJECT_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203C")); + + /// 2203D + pub const TOO_MANY_JSON_ARRAY_ELEMENTS: SqlState = SqlState(Cow::Borrowed("2203D")); + + /// 2203E + pub const TOO_MANY_JSON_OBJECT_MEMBERS: SqlState = SqlState(Cow::Borrowed("2203E")); + + /// 2203F + pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Cow::Borrowed("2203F")); + /// 23000 pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23000")); @@ -777,299 +825,318 @@ impl SqlState { } #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 3213172566270843353, + key: 732231254413039614, disps: ::phf::Slice::Static(&[ - (1, 16), - (1, 97), - (0, 146), + (0, 7), + (2, 136), + (0, 13), + (2, 32), + (0, 204), + (0, 46), + (0, 154), + (1, 221), + (0, 42), + (2, 246), (0, 0), - (1, 0), - (0, 217), - (3, 134), + (0, 1), + (1, 18), + (2, 7), + (0, 0), + (0, 1), + (0, 4), + (0, 17), (0, 2), - (0, 6), - (0, 32), - (1, 99), - (1, 227), - (0, 6), - (0, 163), - (0, 89), - (0, 5), + (0, 22), + (0, 93), + (1, 92), + (0, 145), + (0, 1), + (9, 81), + (0, 26), + (1, 53), + (0, 62), + (2, 173), + (0, 10), (0, 3), - (0, 200), - (4, 99), - (0, 32), - (0, 19), - (0, 82), - (0, 54), - (1, 126), - (0, 11), - (0, 83), - (6, 102), - (0, 67), - (4, 162), - (0, 13), - (0, 116), - (11, 57), - (0, 210), - (0, 4), - (4, 127), - (1, 133), - (1, 158), - (0, 180), - (2, 201), - (0, 148), - (4, 135), - (0, 5), - (1, 1), - (0, 0), - (0, 191), - (0, 171), - (7, 38), + (0, 204), + (0, 26), + (0, 2), + (3, 80), + (1, 206), + (1, 61), + (0, 103), + (0, 3), + (25, 171), (0, 0), (0, 0), + (4, 107), + (0, 129), + (0, 114), + (4, 201), + (20, 163), + (14, 72), + (0, 147), + (6, 113), + (5, 170), + (0, 23), ]), entries: ::phf::Slice::Static(&[ - ("22022", SqlState::INDICATOR_OVERFLOW), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("42622", SqlState::NAME_TOO_LONG), - ("42P19", SqlState::INVALID_RECURSION), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("53200", SqlState::OUT_OF_MEMORY), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("08006", SqlState::CONNECTION_FAILURE), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("P0004", SqlState::ASSERT_FAILURE), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("57014", SqlState::QUERY_CANCELED), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("0L000", SqlState::INVALID_GRANTOR), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("22012", SqlState::DIVISION_BY_ZERO), ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("42939", SqlState::RESERVED_NAME), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("XX002", SqlState::INDEX_CORRUPTED), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("42602", SqlState::INVALID_NAME), - ("55006", SqlState::OBJECT_IN_USE), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("02000", SqlState::NO_DATA), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("P0002", SqlState::NO_DATA_FOUND), - ("P0003", SqlState::TOO_MANY_ROWS), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("42803", SqlState::GROUPING_ERROR), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("42846", SqlState::CANNOT_COERCE), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("54011", SqlState::TOO_MANY_COLUMNS), ("53100", SqlState::DISK_FULL), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("58P02", SqlState::DUPLICATE_FILE), - ("P0001", SqlState::RAISE_EXCEPTION), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("42601", SqlState::SYNTAX_ERROR), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("42P20", SqlState::WINDOWING_ERROR), ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("58000", SqlState::SYSTEM_ERROR), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("23514", SqlState::CHECK_VIOLATION), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("20000", SqlState::CASE_NOT_FOUND), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), + ("42804", SqlState::DATATYPE_MISMATCH), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), ("XX001", SqlState::DATA_CORRUPTED), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("57P04", SqlState::DATABASE_DROPPED), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), ("42701", SqlState::DUPLICATE_COLUMN), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("22011", SqlState::SUBSTRING_ERROR), - ("42P20", SqlState::WINDOWING_ERROR), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), + ("42601", SqlState::SYNTAX_ERROR), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("42712", SqlState::DUPLICATE_ALIAS), + ("28P01", SqlState::INVALID_PASSWORD), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("55006", SqlState::OBJECT_IN_USE), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("53200", SqlState::OUT_OF_MEMORY), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("23001", SqlState::RESTRICT_VIOLATION), ("428C9", SqlState::GENERATED_ALWAYS), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("42P07", SqlState::DUPLICATE_TABLE), + ("57P04", SqlState::DATABASE_DROPPED), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("P0002", SqlState::NO_DATA_FOUND), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("58000", SqlState::SYSTEM_ERROR), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("02000", SqlState::NO_DATA), + ("22027", SqlState::TRIM_ERROR), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("57P02", SqlState::CRASH_SHUTDOWN), ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("XX000", SqlState::INTERNAL_ERROR), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), ("2200N", SqlState::INVALID_XML_CONTENT), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("28P01", SqlState::INVALID_PASSWORD), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("P0000", SqlState::PLPGSQL_ERROR), - ("42712", SqlState::DUPLICATE_ALIAS), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("42602", SqlState::INVALID_NAME), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("22012", SqlState::DIVISION_BY_ZERO), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("42804", SqlState::DATATYPE_MISMATCH), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("42710", SqlState::DUPLICATE_OBJECT), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("58P02", SqlState::DUPLICATE_FILE), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("42P07", SqlState::DUPLICATE_TABLE), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("42725", SqlState::AMBIGUOUS_FUNCTION), ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("HV000", SqlState::FDW_ERROR), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("23505", SqlState::UNIQUE_VIOLATION), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("22011", SqlState::SUBSTRING_ERROR), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("XX000", SqlState::INTERNAL_ERROR), ("42703", SqlState::UNDEFINED_COLUMN), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("01000", SqlState::WARNING), - ("42P01", SqlState::UNDEFINED_TABLE), - ("22027", SqlState::TRIM_ERROR), + ("42704", SqlState::UNDEFINED_OBJECT), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("P0003", SqlState::TOO_MANY_ROWS), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("08006", SqlState::CONNECTION_FAILURE), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("42710", SqlState::DUPLICATE_OBJECT), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("42P21", SqlState::COLLATION_MISMATCH), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("P0004", SqlState::ASSERT_FAILURE), ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("P0001", SqlState::RAISE_EXCEPTION), + ("23514", SqlState::CHECK_VIOLATION), + ("57P03", SqlState::CANNOT_CONNECT_NOW), ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), ("58P01", SqlState::UNDEFINED_FILE), + ("42P01", SqlState::UNDEFINED_TABLE), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("42704", SqlState::UNDEFINED_OBJECT), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("58030", SqlState::IO_ERROR), + ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("42846", SqlState::CANNOT_COERCE), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("21000", SqlState::CARDINALITY_VIOLATION), ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("P0000", SqlState::PLPGSQL_ERROR), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("3D000", SqlState::INVALID_CATALOG_NAME), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("HV000", SqlState::FDW_ERROR), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("42803", SqlState::GROUPING_ERROR), ("22000", SqlState::DATA_EXCEPTION), - ("58030", SqlState::IO_ERROR), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("23505", SqlState::UNIQUE_VIOLATION), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), + ("42939", SqlState::RESERVED_NAME), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("57014", SqlState::QUERY_CANCELED), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("23001", SqlState::RESTRICT_VIOLATION), + ("0L000", SqlState::INVALID_GRANTOR), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("XX002", SqlState::INDEX_CORRUPTED), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("57P01", SqlState::ADMIN_SHUTDOWN), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("01000", SqlState::WARNING), + ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), + ("22032", SqlState::INVALID_JSON_TEXT), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("42P21", SqlState::COLLATION_MISMATCH), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("42P19", SqlState::INVALID_RECURSION), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("42622", SqlState::NAME_TOO_LONG), + ("20000", SqlState::CASE_NOT_FOUND), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), ]), }; From 42f1f9675cddd1e8f10d29e26c48d0c4c2f3c9a9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Oct 2019 16:37:25 -0700 Subject: [PATCH 253/819] Update codegen to Postgres 12 They've removed the old header files so some updates were required --- codegen/src/pg_range.dat | 31 ++ codegen/src/pg_range.h | 85 --- codegen/src/pg_type.dat | 598 +++++++++++++++++++++ codegen/src/pg_type.h | 752 --------------------------- codegen/src/type_gen.rs | 240 +++++++-- tokio-postgres/src/types/type_gen.rs | 124 ++--- 6 files changed, 871 insertions(+), 959 deletions(-) create mode 100644 codegen/src/pg_range.dat delete mode 100644 codegen/src/pg_range.h create mode 100644 codegen/src/pg_type.dat delete mode 100644 codegen/src/pg_type.h diff --git a/codegen/src/pg_range.dat b/codegen/src/pg_range.dat new file mode 100644 index 000000000..dd9baa267 --- /dev/null +++ b/codegen/src/pg_range.dat @@ -0,0 +1,31 @@ +#---------------------------------------------------------------------- +# +# pg_range.dat +# Initial contents of the pg_range system catalog. +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_range.dat +# +#---------------------------------------------------------------------- + +[ + +{ rngtypid => 'int4range', rngsubtype => 'int4', rngsubopc => 'btree/int4_ops', + rngcanonical => 'int4range_canonical', rngsubdiff => 'int4range_subdiff' }, +{ rngtypid => 'numrange', rngsubtype => 'numeric', + rngsubopc => 'btree/numeric_ops', rngcanonical => '-', + rngsubdiff => 'numrange_subdiff' }, +{ rngtypid => 'tsrange', rngsubtype => 'timestamp', + rngsubopc => 'btree/timestamp_ops', rngcanonical => '-', + rngsubdiff => 'tsrange_subdiff' }, +{ rngtypid => 'tstzrange', rngsubtype => 'timestamptz', + rngsubopc => 'btree/timestamptz_ops', rngcanonical => '-', + rngsubdiff => 'tstzrange_subdiff' }, +{ rngtypid => 'daterange', rngsubtype => 'date', rngsubopc => 'btree/date_ops', + rngcanonical => 'daterange_canonical', rngsubdiff => 'daterange_subdiff' }, +{ rngtypid => 'int8range', rngsubtype => 'int8', rngsubopc => 'btree/int8_ops', + rngcanonical => 'int8range_canonical', rngsubdiff => 'int8range_subdiff' }, + +] diff --git a/codegen/src/pg_range.h b/codegen/src/pg_range.h deleted file mode 100644 index 4ed57fe2e..000000000 --- a/codegen/src/pg_range.h +++ /dev/null @@ -1,85 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_range.h - * definition of the system "range" relation (pg_range) - * along with the relation's initial contents. - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_range.h - * - * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... - * - *------------------------------------------------------------------------- - */ -#ifndef PG_RANGE_H -#define PG_RANGE_H - -#include "catalog/genbki.h" - -/* ---------------- - * pg_range definition. cpp turns this into - * typedef struct FormData_pg_range - * ---------------- - */ -#define RangeRelationId 3541 - -CATALOG(pg_range,3541) BKI_WITHOUT_OIDS -{ - Oid rngtypid; /* OID of owning range type */ - Oid rngsubtype; /* OID of range's element type (subtype) */ - Oid rngcollation; /* collation for this range type, or 0 */ - Oid rngsubopc; /* subtype's btree opclass */ - regproc rngcanonical; /* canonicalize range, or 0 */ - regproc rngsubdiff; /* subtype difference as a float8, or 0 */ -} FormData_pg_range; - -/* ---------------- - * Form_pg_range corresponds to a pointer to a tuple with - * the format of pg_range relation. - * ---------------- - */ -typedef FormData_pg_range *Form_pg_range; - -/* ---------------- - * compiler constants for pg_range - * ---------------- - */ -#define Natts_pg_range 6 -#define Anum_pg_range_rngtypid 1 -#define Anum_pg_range_rngsubtype 2 -#define Anum_pg_range_rngcollation 3 -#define Anum_pg_range_rngsubopc 4 -#define Anum_pg_range_rngcanonical 5 -#define Anum_pg_range_rngsubdiff 6 - - -/* ---------------- - * initial contents of pg_range - * ---------------- - */ -DATA(insert ( 3904 23 0 1978 int4range_canonical int4range_subdiff)); -DATA(insert ( 3906 1700 0 3125 - numrange_subdiff)); -DATA(insert ( 3908 1114 0 3128 - tsrange_subdiff)); -DATA(insert ( 3910 1184 0 3127 - tstzrange_subdiff)); -DATA(insert ( 3912 1082 0 3122 daterange_canonical daterange_subdiff)); -DATA(insert ( 3926 20 0 3124 int8range_canonical int8range_subdiff)); - - -/* - * prototypes for functions in pg_range.c - */ - -extern void RangeCreate(Oid rangeTypeOid, Oid rangeSubType, Oid rangeCollation, - Oid rangeSubOpclass, RegProcedure rangeCanonical, - RegProcedure rangeSubDiff); -extern void RangeDelete(Oid rangeTypeOid); - -#endif /* PG_RANGE_H */ diff --git a/codegen/src/pg_type.dat b/codegen/src/pg_type.dat new file mode 100644 index 000000000..be49e0011 --- /dev/null +++ b/codegen/src/pg_type.dat @@ -0,0 +1,598 @@ +#---------------------------------------------------------------------- +# +# pg_type.dat +# Initial contents of the pg_type system catalog. +# +# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_type.dat +# +#---------------------------------------------------------------------- + +[ + +# For types used in the system catalogs, make sure the values here match +# TypInfo[] in bootstrap.c. + +# OID symbol macro names for pg_type OIDs are generated by genbki.pl +# according to the following rule, so you don't need to specify them +# here: +# foo_bar -> FOO_BAROID +# _foo_bar -> FOO_BARARRAYOID +# +# The only oid_symbol entries in this file are for names that don't match +# this rule, and are grandfathered in. + +# To autogenerate an array type, add 'array_type_oid => 'nnnn' to the element +# type, which will instruct genbki.pl to generate a BKI entry for it. +# In a few cases, the array type's properties don't match the normal pattern +# so it can't be autogenerated; in such cases do not write array_type_oid. + +# Once upon a time these entries were ordered by OID. Lately it's often +# been the custom to insert new entries adjacent to related older entries. +# Try to do one or the other though, don't just insert entries at random. + +# OIDS 1 - 99 + +{ oid => '16', array_type_oid => '1000', + descr => 'boolean, \'true\'/\'false\'', + typname => 'bool', typlen => '1', typbyval => 't', typcategory => 'B', + typispreferred => 't', typinput => 'boolin', typoutput => 'boolout', + typreceive => 'boolrecv', typsend => 'boolsend', typalign => 'c' }, +{ oid => '17', array_type_oid => '1001', + descr => 'variable-length string, binary values escaped', + typname => 'bytea', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'byteain', typoutput => 'byteaout', typreceive => 'bytearecv', + typsend => 'byteasend', typalign => 'i', typstorage => 'x' }, +{ oid => '18', array_type_oid => '1002', descr => 'single character', + typname => 'char', typlen => '1', typbyval => 't', typcategory => 'S', + typinput => 'charin', typoutput => 'charout', typreceive => 'charrecv', + typsend => 'charsend', typalign => 'c' }, +{ oid => '19', array_type_oid => '1003', + descr => '63-byte type for storing system identifiers', + typname => 'name', typlen => 'NAMEDATALEN', typbyval => 'f', + typcategory => 'S', typelem => 'char', typinput => 'namein', + typoutput => 'nameout', typreceive => 'namerecv', typsend => 'namesend', + typalign => 'c', typcollation => 'C' }, +{ oid => '20', array_type_oid => '1016', + descr => '~18 digit integer, 8-byte storage', + typname => 'int8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typinput => 'int8in', typoutput => 'int8out', + typreceive => 'int8recv', typsend => 'int8send', typalign => 'd' }, +{ oid => '21', array_type_oid => '1005', + descr => '-32 thousand to 32 thousand, 2-byte storage', + typname => 'int2', typlen => '2', typbyval => 't', typcategory => 'N', + typinput => 'int2in', typoutput => 'int2out', typreceive => 'int2recv', + typsend => 'int2send', typalign => 's' }, +{ oid => '22', array_type_oid => '1006', + descr => 'array of int2, used in system tables', + typname => 'int2vector', typlen => '-1', typbyval => 'f', typcategory => 'A', + typelem => 'int2', typinput => 'int2vectorin', typoutput => 'int2vectorout', + typreceive => 'int2vectorrecv', typsend => 'int2vectorsend', + typalign => 'i' }, +{ oid => '23', array_type_oid => '1007', + descr => '-2 billion to 2 billion integer, 4-byte storage', + typname => 'int4', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'int4in', typoutput => 'int4out', typreceive => 'int4recv', + typsend => 'int4send', typalign => 'i' }, +{ oid => '24', array_type_oid => '1008', descr => 'registered procedure', + typname => 'regproc', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regprocin', typoutput => 'regprocout', + typreceive => 'regprocrecv', typsend => 'regprocsend', typalign => 'i' }, +{ oid => '25', array_type_oid => '1009', + descr => 'variable-length string, no limit specified', + typname => 'text', typlen => '-1', typbyval => 'f', typcategory => 'S', + typispreferred => 't', typinput => 'textin', typoutput => 'textout', + typreceive => 'textrecv', typsend => 'textsend', typalign => 'i', + typstorage => 'x', typcollation => 'default' }, +{ oid => '26', array_type_oid => '1028', + descr => 'object identifier(oid), maximum 4 billion', + typname => 'oid', typlen => '4', typbyval => 't', typcategory => 'N', + typispreferred => 't', typinput => 'oidin', typoutput => 'oidout', + typreceive => 'oidrecv', typsend => 'oidsend', typalign => 'i' }, +{ oid => '27', array_type_oid => '1010', + descr => '(block, offset), physical location of tuple', + typname => 'tid', typlen => '6', typbyval => 'f', typcategory => 'U', + typinput => 'tidin', typoutput => 'tidout', typreceive => 'tidrecv', + typsend => 'tidsend', typalign => 's' }, +{ oid => '28', array_type_oid => '1011', descr => 'transaction id', + typname => 'xid', typlen => '4', typbyval => 't', typcategory => 'U', + typinput => 'xidin', typoutput => 'xidout', typreceive => 'xidrecv', + typsend => 'xidsend', typalign => 'i' }, +{ oid => '29', array_type_oid => '1012', + descr => 'command identifier type, sequence in transaction id', + typname => 'cid', typlen => '4', typbyval => 't', typcategory => 'U', + typinput => 'cidin', typoutput => 'cidout', typreceive => 'cidrecv', + typsend => 'cidsend', typalign => 'i' }, +{ oid => '30', array_type_oid => '1013', + descr => 'array of oids, used in system tables', + typname => 'oidvector', typlen => '-1', typbyval => 'f', typcategory => 'A', + typelem => 'oid', typinput => 'oidvectorin', typoutput => 'oidvectorout', + typreceive => 'oidvectorrecv', typsend => 'oidvectorsend', typalign => 'i' }, + +# hand-built rowtype entries for bootstrapped catalogs +# NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations +{ oid => '71', + typname => 'pg_type', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => 'pg_type', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '75', + typname => 'pg_attribute', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => 'pg_attribute', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '81', + typname => 'pg_proc', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => 'pg_proc', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '83', + typname => 'pg_class', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => 'pg_class', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, + +# OIDS 100 - 199 + +{ oid => '114', array_type_oid => '199', descr => 'JSON stored as text', + typname => 'json', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'json_in', typoutput => 'json_out', typreceive => 'json_recv', + typsend => 'json_send', typalign => 'i', typstorage => 'x' }, +{ oid => '142', array_type_oid => '143', descr => 'XML content', + typname => 'xml', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'xml_in', typoutput => 'xml_out', typreceive => 'xml_recv', + typsend => 'xml_send', typalign => 'i', typstorage => 'x' }, +{ oid => '194', oid_symbol => 'PGNODETREEOID', + descr => 'string representing an internal node tree', + typname => 'pg_node_tree', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_node_tree_in', + typoutput => 'pg_node_tree_out', typreceive => 'pg_node_tree_recv', + typsend => 'pg_node_tree_send', typalign => 'i', typstorage => 'x', + typcollation => 'default' }, +{ oid => '3361', oid_symbol => 'PGNDISTINCTOID', + descr => 'multivariate ndistinct coefficients', + typname => 'pg_ndistinct', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_ndistinct_in', + typoutput => 'pg_ndistinct_out', typreceive => 'pg_ndistinct_recv', + typsend => 'pg_ndistinct_send', typalign => 'i', typstorage => 'x', + typcollation => 'default' }, +{ oid => '3402', oid_symbol => 'PGDEPENDENCIESOID', + descr => 'multivariate dependencies', + typname => 'pg_dependencies', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_dependencies_in', + typoutput => 'pg_dependencies_out', typreceive => 'pg_dependencies_recv', + typsend => 'pg_dependencies_send', typalign => 'i', typstorage => 'x', + typcollation => 'default' }, +{ oid => '5017', oid_symbol => 'PGMCVLISTOID', + descr => 'multivariate MCV list', + typname => 'pg_mcv_list', typlen => '-1', typbyval => 'f', typcategory => 'S', + typinput => 'pg_mcv_list_in', typoutput => 'pg_mcv_list_out', + typreceive => 'pg_mcv_list_recv', typsend => 'pg_mcv_list_send', + typalign => 'i', typstorage => 'x', typcollation => 'default' }, +{ oid => '32', oid_symbol => 'PGDDLCOMMANDOID', + descr => 'internal type for passing CollectedCommand', + typname => 'pg_ddl_command', typlen => 'SIZEOF_POINTER', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'pg_ddl_command_in', + typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', + typsend => 'pg_ddl_command_send', typalign => 'ALIGNOF_POINTER' }, + +# OIDS 600 - 699 + +{ oid => '600', array_type_oid => '1017', + descr => 'geometric point \'(x, y)\'', + typname => 'point', typlen => '16', typbyval => 'f', typcategory => 'G', + typelem => 'float8', typinput => 'point_in', typoutput => 'point_out', + typreceive => 'point_recv', typsend => 'point_send', typalign => 'd' }, +{ oid => '601', array_type_oid => '1018', + descr => 'geometric line segment \'(pt1,pt2)\'', + typname => 'lseg', typlen => '32', typbyval => 'f', typcategory => 'G', + typelem => 'point', typinput => 'lseg_in', typoutput => 'lseg_out', + typreceive => 'lseg_recv', typsend => 'lseg_send', typalign => 'd' }, +{ oid => '602', array_type_oid => '1019', + descr => 'geometric path \'(pt1,...)\'', + typname => 'path', typlen => '-1', typbyval => 'f', typcategory => 'G', + typinput => 'path_in', typoutput => 'path_out', typreceive => 'path_recv', + typsend => 'path_send', typalign => 'd', typstorage => 'x' }, +{ oid => '603', array_type_oid => '1020', + descr => 'geometric box \'(lower left,upper right)\'', + typname => 'box', typlen => '32', typbyval => 'f', typcategory => 'G', + typdelim => ';', typelem => 'point', typinput => 'box_in', + typoutput => 'box_out', typreceive => 'box_recv', typsend => 'box_send', + typalign => 'd' }, +{ oid => '604', array_type_oid => '1027', + descr => 'geometric polygon \'(pt1,...)\'', + typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G', + typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv', + typsend => 'poly_send', typalign => 'd', typstorage => 'x' }, +{ oid => '628', array_type_oid => '629', descr => 'geometric line', + typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G', + typelem => 'float8', typinput => 'line_in', typoutput => 'line_out', + typreceive => 'line_recv', typsend => 'line_send', typalign => 'd' }, + +# OIDS 700 - 799 + +{ oid => '700', array_type_oid => '1021', + descr => 'single-precision floating point number, 4-byte storage', + typname => 'float4', typlen => '4', typbyval => 'FLOAT4PASSBYVAL', + typcategory => 'N', typinput => 'float4in', typoutput => 'float4out', + typreceive => 'float4recv', typsend => 'float4send', typalign => 'i' }, +{ oid => '701', array_type_oid => '1022', + descr => 'double-precision floating point number, 8-byte storage', + typname => 'float8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typispreferred => 't', typinput => 'float8in', + typoutput => 'float8out', typreceive => 'float8recv', typsend => 'float8send', + typalign => 'd' }, +{ oid => '705', descr => 'pseudo-type representing an undetermined type', + typname => 'unknown', typlen => '-2', typbyval => 'f', typtype => 'p', + typcategory => 'X', typinput => 'unknownin', typoutput => 'unknownout', + typreceive => 'unknownrecv', typsend => 'unknownsend', typalign => 'c' }, +{ oid => '718', array_type_oid => '719', + descr => 'geometric circle \'(center,radius)\'', + typname => 'circle', typlen => '24', typbyval => 'f', typcategory => 'G', + typinput => 'circle_in', typoutput => 'circle_out', + typreceive => 'circle_recv', typsend => 'circle_send', typalign => 'd' }, +{ oid => '790', oid_symbol => 'CASHOID', array_type_oid => '791', + descr => 'monetary amounts, $d,ddd.cc', + typname => 'money', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typinput => 'cash_in', typoutput => 'cash_out', + typreceive => 'cash_recv', typsend => 'cash_send', typalign => 'd' }, + +# OIDS 800 - 899 + +{ oid => '829', array_type_oid => '1040', + descr => 'XX:XX:XX:XX:XX:XX, MAC address', + typname => 'macaddr', typlen => '6', typbyval => 'f', typcategory => 'U', + typinput => 'macaddr_in', typoutput => 'macaddr_out', + typreceive => 'macaddr_recv', typsend => 'macaddr_send', typalign => 'i' }, +{ oid => '869', array_type_oid => '1041', + descr => 'IP address/netmask, host address, netmask optional', + typname => 'inet', typlen => '-1', typbyval => 'f', typcategory => 'I', + typispreferred => 't', typinput => 'inet_in', typoutput => 'inet_out', + typreceive => 'inet_recv', typsend => 'inet_send', typalign => 'i', + typstorage => 'm' }, +{ oid => '650', array_type_oid => '651', + descr => 'network IP address/netmask, network address', + typname => 'cidr', typlen => '-1', typbyval => 'f', typcategory => 'I', + typinput => 'cidr_in', typoutput => 'cidr_out', typreceive => 'cidr_recv', + typsend => 'cidr_send', typalign => 'i', typstorage => 'm' }, +{ oid => '774', array_type_oid => '775', + descr => 'XX:XX:XX:XX:XX:XX:XX:XX, MAC address', + typname => 'macaddr8', typlen => '8', typbyval => 'f', typcategory => 'U', + typinput => 'macaddr8_in', typoutput => 'macaddr8_out', + typreceive => 'macaddr8_recv', typsend => 'macaddr8_send', typalign => 'i' }, + +# OIDS 1000 - 1099 + +{ oid => '1033', array_type_oid => '1034', descr => 'access control list', + typname => 'aclitem', typlen => '12', typbyval => 'f', typcategory => 'U', + typinput => 'aclitemin', typoutput => 'aclitemout', typreceive => '-', + typsend => '-', typalign => 'i' }, +{ oid => '1042', array_type_oid => '1014', + descr => 'char(length), blank-padded string, fixed storage length', + typname => 'bpchar', typlen => '-1', typbyval => 'f', typcategory => 'S', + typinput => 'bpcharin', typoutput => 'bpcharout', typreceive => 'bpcharrecv', + typsend => 'bpcharsend', typmodin => 'bpchartypmodin', + typmodout => 'bpchartypmodout', typalign => 'i', typstorage => 'x', + typcollation => 'default' }, +{ oid => '1043', array_type_oid => '1015', + descr => 'varchar(length), non-blank-padded string, variable storage length', + typname => 'varchar', typlen => '-1', typbyval => 'f', typcategory => 'S', + typinput => 'varcharin', typoutput => 'varcharout', + typreceive => 'varcharrecv', typsend => 'varcharsend', + typmodin => 'varchartypmodin', typmodout => 'varchartypmodout', + typalign => 'i', typstorage => 'x', typcollation => 'default' }, +{ oid => '1082', array_type_oid => '1182', descr => 'date', + typname => 'date', typlen => '4', typbyval => 't', typcategory => 'D', + typinput => 'date_in', typoutput => 'date_out', typreceive => 'date_recv', + typsend => 'date_send', typalign => 'i' }, +{ oid => '1083', array_type_oid => '1183', descr => 'time of day', + typname => 'time', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typinput => 'time_in', typoutput => 'time_out', + typreceive => 'time_recv', typsend => 'time_send', typmodin => 'timetypmodin', + typmodout => 'timetypmodout', typalign => 'd' }, + +# OIDS 1100 - 1199 + +{ oid => '1114', array_type_oid => '1115', descr => 'date and time', + typname => 'timestamp', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typinput => 'timestamp_in', typoutput => 'timestamp_out', + typreceive => 'timestamp_recv', typsend => 'timestamp_send', + typmodin => 'timestamptypmodin', typmodout => 'timestamptypmodout', + typalign => 'd' }, +{ oid => '1184', array_type_oid => '1185', + descr => 'date and time with time zone', + typname => 'timestamptz', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typispreferred => 't', typinput => 'timestamptz_in', + typoutput => 'timestamptz_out', typreceive => 'timestamptz_recv', + typsend => 'timestamptz_send', typmodin => 'timestamptztypmodin', + typmodout => 'timestamptztypmodout', typalign => 'd' }, +{ oid => '1186', array_type_oid => '1187', + descr => '@ , time interval', + typname => 'interval', typlen => '16', typbyval => 'f', typcategory => 'T', + typispreferred => 't', typinput => 'interval_in', typoutput => 'interval_out', + typreceive => 'interval_recv', typsend => 'interval_send', + typmodin => 'intervaltypmodin', typmodout => 'intervaltypmodout', + typalign => 'd' }, + +# OIDS 1200 - 1299 + +{ oid => '1266', array_type_oid => '1270', + descr => 'time of day with time zone', + typname => 'timetz', typlen => '12', typbyval => 'f', typcategory => 'D', + typinput => 'timetz_in', typoutput => 'timetz_out', + typreceive => 'timetz_recv', typsend => 'timetz_send', + typmodin => 'timetztypmodin', typmodout => 'timetztypmodout', + typalign => 'd' }, + +# OIDS 1500 - 1599 + +{ oid => '1560', array_type_oid => '1561', descr => 'fixed-length bit string', + typname => 'bit', typlen => '-1', typbyval => 'f', typcategory => 'V', + typinput => 'bit_in', typoutput => 'bit_out', typreceive => 'bit_recv', + typsend => 'bit_send', typmodin => 'bittypmodin', typmodout => 'bittypmodout', + typalign => 'i', typstorage => 'x' }, +{ oid => '1562', array_type_oid => '1563', + descr => 'variable-length bit string', + typname => 'varbit', typlen => '-1', typbyval => 'f', typcategory => 'V', + typispreferred => 't', typinput => 'varbit_in', typoutput => 'varbit_out', + typreceive => 'varbit_recv', typsend => 'varbit_send', + typmodin => 'varbittypmodin', typmodout => 'varbittypmodout', typalign => 'i', + typstorage => 'x' }, + +# OIDS 1700 - 1799 + +{ oid => '1700', array_type_oid => '1231', + descr => 'numeric(precision, decimal), arbitrary precision number', + typname => 'numeric', typlen => '-1', typbyval => 'f', typcategory => 'N', + typinput => 'numeric_in', typoutput => 'numeric_out', + typreceive => 'numeric_recv', typsend => 'numeric_send', + typmodin => 'numerictypmodin', typmodout => 'numerictypmodout', + typalign => 'i', typstorage => 'm' }, + +{ oid => '1790', array_type_oid => '2201', + descr => 'reference to cursor (portal name)', + typname => 'refcursor', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'textin', typoutput => 'textout', typreceive => 'textrecv', + typsend => 'textsend', typalign => 'i', typstorage => 'x' }, + +# OIDS 2200 - 2299 + +{ oid => '2202', array_type_oid => '2207', + descr => 'registered procedure (with args)', + typname => 'regprocedure', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regprocedurein', typoutput => 'regprocedureout', + typreceive => 'regprocedurerecv', typsend => 'regproceduresend', + typalign => 'i' }, +{ oid => '2203', array_type_oid => '2208', descr => 'registered operator', + typname => 'regoper', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regoperin', typoutput => 'regoperout', + typreceive => 'regoperrecv', typsend => 'regopersend', typalign => 'i' }, +{ oid => '2204', array_type_oid => '2209', + descr => 'registered operator (with args)', + typname => 'regoperator', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regoperatorin', typoutput => 'regoperatorout', + typreceive => 'regoperatorrecv', typsend => 'regoperatorsend', + typalign => 'i' }, +{ oid => '2205', array_type_oid => '2210', descr => 'registered class', + typname => 'regclass', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regclassin', typoutput => 'regclassout', + typreceive => 'regclassrecv', typsend => 'regclasssend', typalign => 'i' }, +{ oid => '2206', array_type_oid => '2211', descr => 'registered type', + typname => 'regtype', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regtypein', typoutput => 'regtypeout', + typreceive => 'regtyperecv', typsend => 'regtypesend', typalign => 'i' }, +{ oid => '4096', array_type_oid => '4097', descr => 'registered role', + typname => 'regrole', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regrolein', typoutput => 'regroleout', + typreceive => 'regrolerecv', typsend => 'regrolesend', typalign => 'i' }, +{ oid => '4089', array_type_oid => '4090', descr => 'registered namespace', + typname => 'regnamespace', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regnamespacein', typoutput => 'regnamespaceout', + typreceive => 'regnamespacerecv', typsend => 'regnamespacesend', + typalign => 'i' }, + +# uuid +{ oid => '2950', array_type_oid => '2951', descr => 'UUID datatype', + typname => 'uuid', typlen => '16', typbyval => 'f', typcategory => 'U', + typinput => 'uuid_in', typoutput => 'uuid_out', typreceive => 'uuid_recv', + typsend => 'uuid_send', typalign => 'c' }, + +# pg_lsn +{ oid => '3220', oid_symbol => 'LSNOID', array_type_oid => '3221', + descr => 'PostgreSQL LSN datatype', + typname => 'pg_lsn', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'U', typinput => 'pg_lsn_in', typoutput => 'pg_lsn_out', + typreceive => 'pg_lsn_recv', typsend => 'pg_lsn_send', typalign => 'd' }, + +# text search +{ oid => '3614', array_type_oid => '3643', + descr => 'text representation for text search', + typname => 'tsvector', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'tsvectorin', typoutput => 'tsvectorout', + typreceive => 'tsvectorrecv', typsend => 'tsvectorsend', + typanalyze => 'ts_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3642', array_type_oid => '3644', + descr => 'GiST index internal text representation for text search', + typname => 'gtsvector', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'gtsvectorin', typoutput => 'gtsvectorout', typreceive => '-', + typsend => '-', typalign => 'i' }, +{ oid => '3615', array_type_oid => '3645', + descr => 'query representation for text search', + typname => 'tsquery', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'tsqueryin', typoutput => 'tsqueryout', + typreceive => 'tsqueryrecv', typsend => 'tsquerysend', typalign => 'i' }, +{ oid => '3734', array_type_oid => '3735', + descr => 'registered text search configuration', + typname => 'regconfig', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regconfigin', typoutput => 'regconfigout', + typreceive => 'regconfigrecv', typsend => 'regconfigsend', typalign => 'i' }, +{ oid => '3769', array_type_oid => '3770', + descr => 'registered text search dictionary', + typname => 'regdictionary', typlen => '4', typbyval => 't', + typcategory => 'N', typinput => 'regdictionaryin', + typoutput => 'regdictionaryout', typreceive => 'regdictionaryrecv', + typsend => 'regdictionarysend', typalign => 'i' }, + +# jsonb +{ oid => '3802', array_type_oid => '3807', descr => 'Binary JSON', + typname => 'jsonb', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'jsonb_in', typoutput => 'jsonb_out', typreceive => 'jsonb_recv', + typsend => 'jsonb_send', typalign => 'i', typstorage => 'x' }, +{ oid => '4072', array_type_oid => '4073', descr => 'JSON path', + typname => 'jsonpath', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'jsonpath_in', typoutput => 'jsonpath_out', + typreceive => 'jsonpath_recv', typsend => 'jsonpath_send', typalign => 'i', + typstorage => 'x' }, + +{ oid => '2970', array_type_oid => '2949', descr => 'txid snapshot', + typname => 'txid_snapshot', typlen => '-1', typbyval => 'f', + typcategory => 'U', typinput => 'txid_snapshot_in', + typoutput => 'txid_snapshot_out', typreceive => 'txid_snapshot_recv', + typsend => 'txid_snapshot_send', typalign => 'd', typstorage => 'x' }, + +# range types +{ oid => '3904', array_type_oid => '3905', descr => 'range of integers', + typname => 'int4range', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3906', array_type_oid => '3907', descr => 'range of numerics', + typname => 'numrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3908', array_type_oid => '3909', + descr => 'range of timestamps without time zone', + typname => 'tsrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '3910', array_type_oid => '3911', + descr => 'range of timestamps with time zone', + typname => 'tstzrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '3912', array_type_oid => '3913', descr => 'range of dates', + typname => 'daterange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3926', array_type_oid => '3927', descr => 'range of bigints', + typname => 'int8range', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, + +# pseudo-types +# types with typtype='p' represent various special cases in the type system. +# These cannot be used to define table columns, but are valid as function +# argument and result types (if supported by the function's implementation +# language). +# Note: cstring is a borderline case; it is still considered a pseudo-type, +# but there is now support for it in records and arrays. Perhaps we should +# just treat it as a regular base type? + +{ oid => '2249', descr => 'pseudo-type representing any composite type', + typname => 'record', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typarray => '_record', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +# Arrays of records have typcategory P, so they can't be autogenerated. +{ oid => '2287', + typname => '_record', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typelem => 'record', typinput => 'array_in', + typoutput => 'array_out', typreceive => 'array_recv', typsend => 'array_send', + typanalyze => 'array_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '2275', array_type_oid => '1263', descr => 'C-style string', + typname => 'cstring', typlen => '-2', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'cstring_in', typoutput => 'cstring_out', + typreceive => 'cstring_recv', typsend => 'cstring_send', typalign => 'c' }, +{ oid => '2276', descr => 'pseudo-type representing any type', + typname => 'any', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'any_in', typoutput => 'any_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '2277', descr => 'pseudo-type representing a polymorphic array type', + typname => 'anyarray', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anyarray_in', typoutput => 'anyarray_out', + typreceive => 'anyarray_recv', typsend => 'anyarray_send', typalign => 'd', + typstorage => 'x' }, +{ oid => '2278', + descr => 'pseudo-type for the result of a function with no real result', + typname => 'void', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'void_in', typoutput => 'void_out', + typreceive => 'void_recv', typsend => 'void_send', typalign => 'i' }, +{ oid => '2279', descr => 'pseudo-type for the result of a trigger function', + typname => 'trigger', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'trigger_in', typoutput => 'trigger_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '3838', oid_symbol => 'EVTTRIGGEROID', + descr => 'pseudo-type for the result of an event trigger function', + typname => 'event_trigger', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'event_trigger_in', + typoutput => 'event_trigger_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2280', + descr => 'pseudo-type for the result of a language handler function', + typname => 'language_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'language_handler_in', + typoutput => 'language_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2281', + descr => 'pseudo-type representing an internal data structure', + typname => 'internal', typlen => 'SIZEOF_POINTER', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'internal_in', + typoutput => 'internal_out', typreceive => '-', typsend => '-', + typalign => 'ALIGNOF_POINTER' }, +{ oid => '2282', descr => 'obsolete, deprecated pseudo-type', + typname => 'opaque', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'opaque_in', typoutput => 'opaque_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '2283', descr => 'pseudo-type representing a polymorphic base type', + typname => 'anyelement', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anyelement_in', + typoutput => 'anyelement_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2776', + descr => 'pseudo-type representing a polymorphic base type that is not an array', + typname => 'anynonarray', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anynonarray_in', + typoutput => 'anynonarray_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3500', + descr => 'pseudo-type representing a polymorphic base type that is an enum', + typname => 'anyenum', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anyenum_in', typoutput => 'anyenum_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '3115', + descr => 'pseudo-type for the result of an FDW handler function', + typname => 'fdw_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'fdw_handler_in', + typoutput => 'fdw_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '325', + descr => 'pseudo-type for the result of an index AM handler function', + typname => 'index_am_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'index_am_handler_in', + typoutput => 'index_am_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3310', + descr => 'pseudo-type for the result of a tablesample method function', + typname => 'tsm_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'tsm_handler_in', + typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '269', + typname => 'table_am_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'table_am_handler_in', + typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3831', + descr => 'pseudo-type representing a polymorphic base type that is a range', + typname => 'anyrange', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anyrange_in', typoutput => 'anyrange_out', + typreceive => '-', typsend => '-', typalign => 'd', typstorage => 'x' }, + +] diff --git a/codegen/src/pg_type.h b/codegen/src/pg_type.h deleted file mode 100644 index 345e91640..000000000 --- a/codegen/src/pg_type.h +++ /dev/null @@ -1,752 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_type.h - * definition of the system "type" relation (pg_type) - * along with the relation's initial contents. - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_type.h - * - * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - *------------------------------------------------------------------------- - */ -#ifndef PG_TYPE_H -#define PG_TYPE_H - -#include "catalog/genbki.h" - -/* ---------------- - * pg_type definition. cpp turns this into - * typedef struct FormData_pg_type - * - * Some of the values in a pg_type instance are copied into - * pg_attribute instances. Some parts of Postgres use the pg_type copy, - * while others use the pg_attribute copy, so they must match. - * See struct FormData_pg_attribute for details. - * ---------------- - */ -#define TypeRelationId 1247 -#define TypeRelation_Rowtype_Id 71 - -CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO -{ - NameData typname; /* type name */ - Oid typnamespace; /* OID of namespace containing this type */ - Oid typowner; /* type owner */ - - /* - * For a fixed-size type, typlen is the number of bytes we use to - * represent a value of this type, e.g. 4 for an int4. But for a - * variable-length type, typlen is negative. We use -1 to indicate a - * "varlena" type (one that has a length word), -2 to indicate a - * null-terminated C string. - */ - int16 typlen; - - /* - * typbyval determines whether internal Postgres routines pass a value of - * this type by value or by reference. typbyval had better be FALSE if - * the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines). - * Variable-length types are always passed by reference. Note that - * typbyval can be false even if the length would allow pass-by-value; - * this is currently true for type float4, for example. - */ - bool typbyval; - - /* - * typtype is 'b' for a base type, 'c' for a composite type (e.g., a - * table's rowtype), 'd' for a domain, 'e' for an enum type, 'p' for a - * pseudo-type, or 'r' for a range type. (Use the TYPTYPE macros below.) - * - * If typtype is 'c', typrelid is the OID of the class' entry in pg_class. - */ - char typtype; - - /* - * typcategory and typispreferred help the parser distinguish preferred - * and non-preferred coercions. The category can be any single ASCII - * character (but not \0). The categories used for built-in types are - * identified by the TYPCATEGORY macros below. - */ - char typcategory; /* arbitrary type classification */ - - bool typispreferred; /* is type "preferred" within its category? */ - - /* - * If typisdefined is false, the entry is only a placeholder (forward - * reference). We know the type name, but not yet anything else about it. - */ - bool typisdefined; - - char typdelim; /* delimiter for arrays of this type */ - - Oid typrelid; /* 0 if not a composite type */ - - /* - * If typelem is not 0 then it identifies another row in pg_type. The - * current type can then be subscripted like an array yielding values of - * type typelem. A non-zero typelem does not guarantee this type to be a - * "real" array type; some ordinary fixed-length types can also be - * subscripted (e.g., name, point). Variable-length types can *not* be - * turned into pseudo-arrays like that. Hence, the way to determine - * whether a type is a "true" array type is if: - * - * typelem != 0 and typlen == -1. - */ - Oid typelem; - - /* - * If there is a "true" array type having this type as element type, - * typarray links to it. Zero if no associated "true" array type. - */ - Oid typarray; - - /* - * I/O conversion procedures for the datatype. - */ - regproc typinput; /* text format (required) */ - regproc typoutput; - regproc typreceive; /* binary format (optional) */ - regproc typsend; - - /* - * I/O functions for optional type modifiers. - */ - regproc typmodin; - regproc typmodout; - - /* - * Custom ANALYZE procedure for the datatype (0 selects the default). - */ - regproc typanalyze; - - /* ---------------- - * typalign is the alignment required when storing a value of this - * type. It applies to storage on disk as well as most - * representations of the value inside Postgres. When multiple values - * are stored consecutively, such as in the representation of a - * complete row on disk, padding is inserted before a datum of this - * type so that it begins on the specified boundary. The alignment - * reference is the beginning of the first datum in the sequence. - * - * 'c' = CHAR alignment, ie no alignment needed. - * 's' = SHORT alignment (2 bytes on most machines). - * 'i' = INT alignment (4 bytes on most machines). - * 'd' = DOUBLE alignment (8 bytes on many machines, but by no means all). - * - * See include/access/tupmacs.h for the macros that compute these - * alignment requirements. Note also that we allow the nominal alignment - * to be violated when storing "packed" varlenas; the TOAST mechanism - * takes care of hiding that from most code. - * - * NOTE: for types used in system tables, it is critical that the - * size and alignment defined in pg_type agree with the way that the - * compiler will lay out the field in a struct representing a table row. - * ---------------- - */ - char typalign; - - /* ---------------- - * typstorage tells if the type is prepared for toasting and what - * the default strategy for attributes of this type should be. - * - * 'p' PLAIN type not prepared for toasting - * 'e' EXTERNAL external storage possible, don't try to compress - * 'x' EXTENDED try to compress and store external if required - * 'm' MAIN like 'x' but try to keep in main tuple - * ---------------- - */ - char typstorage; - - /* - * This flag represents a "NOT NULL" constraint against this datatype. - * - * If true, the attnotnull column for a corresponding table column using - * this datatype will always enforce the NOT NULL constraint. - * - * Used primarily for domain types. - */ - bool typnotnull; - - /* - * Domains use typbasetype to show the base (or domain) type that the - * domain is based on. Zero if the type is not a domain. - */ - Oid typbasetype; - - /* - * Domains use typtypmod to record the typmod to be applied to their base - * type (-1 if base type does not use a typmod). -1 if this type is not a - * domain. - */ - int32 typtypmod; - - /* - * typndims is the declared number of dimensions for an array domain type - * (i.e., typbasetype is an array type). Otherwise zero. - */ - int32 typndims; - - /* - * Collation: 0 if type cannot use collations, DEFAULT_COLLATION_OID for - * collatable base types, possibly other OID for domains - */ - Oid typcollation; - -#ifdef CATALOG_VARLEN /* variable-length fields start here */ - - /* - * If typdefaultbin is not NULL, it is the nodeToString representation of - * a default expression for the type. Currently this is only used for - * domains. - */ - pg_node_tree typdefaultbin; - - /* - * typdefault is NULL if the type has no associated default value. If - * typdefaultbin is not NULL, typdefault must contain a human-readable - * version of the default expression represented by typdefaultbin. If - * typdefaultbin is NULL and typdefault is not, then typdefault is the - * external representation of the type's default value, which may be fed - * to the type's input converter to produce a constant. - */ - text typdefault; - - /* - * Access permissions - */ - aclitem typacl[1]; -#endif -} FormData_pg_type; - -/* ---------------- - * Form_pg_type corresponds to a pointer to a row with - * the format of pg_type relation. - * ---------------- - */ -typedef FormData_pg_type *Form_pg_type; - -/* ---------------- - * compiler constants for pg_type - * ---------------- - */ -#define Natts_pg_type 30 -#define Anum_pg_type_typname 1 -#define Anum_pg_type_typnamespace 2 -#define Anum_pg_type_typowner 3 -#define Anum_pg_type_typlen 4 -#define Anum_pg_type_typbyval 5 -#define Anum_pg_type_typtype 6 -#define Anum_pg_type_typcategory 7 -#define Anum_pg_type_typispreferred 8 -#define Anum_pg_type_typisdefined 9 -#define Anum_pg_type_typdelim 10 -#define Anum_pg_type_typrelid 11 -#define Anum_pg_type_typelem 12 -#define Anum_pg_type_typarray 13 -#define Anum_pg_type_typinput 14 -#define Anum_pg_type_typoutput 15 -#define Anum_pg_type_typreceive 16 -#define Anum_pg_type_typsend 17 -#define Anum_pg_type_typmodin 18 -#define Anum_pg_type_typmodout 19 -#define Anum_pg_type_typanalyze 20 -#define Anum_pg_type_typalign 21 -#define Anum_pg_type_typstorage 22 -#define Anum_pg_type_typnotnull 23 -#define Anum_pg_type_typbasetype 24 -#define Anum_pg_type_typtypmod 25 -#define Anum_pg_type_typndims 26 -#define Anum_pg_type_typcollation 27 -#define Anum_pg_type_typdefaultbin 28 -#define Anum_pg_type_typdefault 29 -#define Anum_pg_type_typacl 30 - - -/* ---------------- - * initial contents of pg_type - * ---------------- - */ - -/* - * Keep the following ordered by OID so that later changes can be made more - * easily. - * - * For types used in the system catalogs, make sure the values here match - * TypInfo[] in bootstrap.c. - */ - -/* OIDS 1 - 99 */ -DATA(insert OID = 16 ( bool PGNSP PGUID 1 t b B t t \054 0 0 1000 boolin boolout boolrecv boolsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("boolean, 'true'/'false'"); -#define BOOLOID 16 - -DATA(insert OID = 17 ( bytea PGNSP PGUID -1 f b U f t \054 0 0 1001 byteain byteaout bytearecv byteasend - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("variable-length string, binary values escaped"); -#define BYTEAOID 17 - -DATA(insert OID = 18 ( char PGNSP PGUID 1 t b S f t \054 0 0 1002 charin charout charrecv charsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("single character"); -#define CHAROID 18 - -DATA(insert OID = 19 ( name PGNSP PGUID NAMEDATALEN f b S f t \054 0 18 1003 namein nameout namerecv namesend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("63-byte type for storing system identifiers"); -#define NAMEOID 19 - -DATA(insert OID = 20 ( int8 PGNSP PGUID 8 FLOAT8PASSBYVAL b N f t \054 0 0 1016 int8in int8out int8recv int8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("~18 digit integer, 8-byte storage"); -#define INT8OID 20 - -DATA(insert OID = 21 ( int2 PGNSP PGUID 2 t b N f t \054 0 0 1005 int2in int2out int2recv int2send - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("-32 thousand to 32 thousand, 2-byte storage"); -#define INT2OID 21 - -DATA(insert OID = 22 ( int2vector PGNSP PGUID -1 f b A f t \054 0 21 1006 int2vectorin int2vectorout int2vectorrecv int2vectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("array of int2, used in system tables"); -#define INT2VECTOROID 22 - -DATA(insert OID = 23 ( int4 PGNSP PGUID 4 t b N f t \054 0 0 1007 int4in int4out int4recv int4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("-2 billion to 2 billion integer, 4-byte storage"); -#define INT4OID 23 - -DATA(insert OID = 24 ( regproc PGNSP PGUID 4 t b N f t \054 0 0 1008 regprocin regprocout regprocrecv regprocsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered procedure"); -#define REGPROCOID 24 - -DATA(insert OID = 25 ( text PGNSP PGUID -1 f b S t t \054 0 0 1009 textin textout textrecv textsend - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("variable-length string, no limit specified"); -#define TEXTOID 25 - -DATA(insert OID = 26 ( oid PGNSP PGUID 4 t b N t t \054 0 0 1028 oidin oidout oidrecv oidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("object identifier(oid), maximum 4 billion"); -#define OIDOID 26 - -DATA(insert OID = 27 ( tid PGNSP PGUID 6 f b U f t \054 0 0 1010 tidin tidout tidrecv tidsend - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("(block, offset), physical location of tuple"); -#define TIDOID 27 - -DATA(insert OID = 28 ( xid PGNSP PGUID 4 t b U f t \054 0 0 1011 xidin xidout xidrecv xidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("transaction id"); -#define XIDOID 28 - -DATA(insert OID = 29 ( cid PGNSP PGUID 4 t b U f t \054 0 0 1012 cidin cidout cidrecv cidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("command identifier type, sequence in transaction id"); -#define CIDOID 29 - -DATA(insert OID = 30 ( oidvector PGNSP PGUID -1 f b A f t \054 0 26 1013 oidvectorin oidvectorout oidvectorrecv oidvectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("array of oids, used in system tables"); -#define OIDVECTOROID 30 - -/* hand-built rowtype entries for bootstrapped catalogs */ -/* NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations */ - -DATA(insert OID = 71 ( pg_type PGNSP PGUID -1 f c C f t \054 1247 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 75 ( pg_attribute PGNSP PGUID -1 f c C f t \054 1249 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 81 ( pg_proc PGNSP PGUID -1 f c C f t \054 1255 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 83 ( pg_class PGNSP PGUID -1 f c C f t \054 1259 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 100 - 199 */ -DATA(insert OID = 114 ( json PGNSP PGUID -1 f b U f t \054 0 0 199 json_in json_out json_recv json_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define JSONOID 114 -DATA(insert OID = 142 ( xml PGNSP PGUID -1 f b U f t \054 0 0 143 xml_in xml_out xml_recv xml_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XML content"); -#define XMLOID 142 -DATA(insert OID = 143 ( _xml PGNSP PGUID -1 f b A f t \054 0 142 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 199 ( _json PGNSP PGUID -1 f b A f t \054 0 114 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 194 ( pg_node_tree PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node_tree_in pg_node_tree_out pg_node_tree_recv pg_node_tree_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("string representing an internal node tree"); -#define PGNODETREEOID 194 - -DATA(insert OID = 3361 ( pg_ndistinct PGNSP PGUID -1 f b S f t \054 0 0 0 pg_ndistinct_in pg_ndistinct_out pg_ndistinct_recv pg_ndistinct_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("multivariate ndistinct coefficients"); -#define PGNDISTINCTOID 3361 - -DATA(insert OID = 3402 ( pg_dependencies PGNSP PGUID -1 f b S f t \054 0 0 0 pg_dependencies_in pg_dependencies_out pg_dependencies_recv pg_dependencies_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("multivariate dependencies"); -#define PGDEPENDENCIESOID 3402 - -DATA(insert OID = 32 ( pg_ddl_command PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("internal type for passing CollectedCommand"); -#define PGDDLCOMMANDOID 32 - -/* OIDS 200 - 299 */ - -DATA(insert OID = 210 ( smgr PGNSP PGUID 2 t b U f t \054 0 0 0 smgrin smgrout - - - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("storage manager"); - -/* OIDS 300 - 399 */ - -/* OIDS 400 - 499 */ - -/* OIDS 500 - 599 */ - -/* OIDS 600 - 699 */ -DATA(insert OID = 600 ( point PGNSP PGUID 16 f b G f t \054 0 701 1017 point_in point_out point_recv point_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric point '(x, y)'"); -#define POINTOID 600 -DATA(insert OID = 601 ( lseg PGNSP PGUID 32 f b G f t \054 0 600 1018 lseg_in lseg_out lseg_recv lseg_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric line segment '(pt1,pt2)'"); -#define LSEGOID 601 -DATA(insert OID = 602 ( path PGNSP PGUID -1 f b G f t \054 0 0 1019 path_in path_out path_recv path_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric path '(pt1,...)'"); -#define PATHOID 602 -DATA(insert OID = 603 ( box PGNSP PGUID 32 f b G f t \073 0 600 1020 box_in box_out box_recv box_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric box '(lower left,upper right)'"); -#define BOXOID 603 -DATA(insert OID = 604 ( polygon PGNSP PGUID -1 f b G f t \054 0 0 1027 poly_in poly_out poly_recv poly_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric polygon '(pt1,...)'"); -#define POLYGONOID 604 - -DATA(insert OID = 628 ( line PGNSP PGUID 24 f b G f t \054 0 701 629 line_in line_out line_recv line_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric line"); -#define LINEOID 628 -DATA(insert OID = 629 ( _line PGNSP PGUID -1 f b A f t \054 0 628 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 700 - 799 */ - -DATA(insert OID = 700 ( float4 PGNSP PGUID 4 FLOAT4PASSBYVAL b N f t \054 0 0 1021 float4in float4out float4recv float4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("single-precision floating point number, 4-byte storage"); -#define FLOAT4OID 700 -DATA(insert OID = 701 ( float8 PGNSP PGUID 8 FLOAT8PASSBYVAL b N t t \054 0 0 1022 float8in float8out float8recv float8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("double-precision floating point number, 8-byte storage"); -#define FLOAT8OID 701 -DATA(insert OID = 702 ( abstime PGNSP PGUID 4 t b D f t \054 0 0 1023 abstimein abstimeout abstimerecv abstimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("absolute, limited-range date and time (Unix system time)"); -#define ABSTIMEOID 702 -DATA(insert OID = 703 ( reltime PGNSP PGUID 4 t b T f t \054 0 0 1024 reltimein reltimeout reltimerecv reltimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("relative, limited-range time interval (Unix delta time)"); -#define RELTIMEOID 703 -DATA(insert OID = 704 ( tinterval PGNSP PGUID 12 f b T f t \054 0 0 1025 tintervalin tintervalout tintervalrecv tintervalsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("(abstime,abstime), time interval"); -#define TINTERVALOID 704 -DATA(insert OID = 705 ( unknown PGNSP PGUID -2 f p X f t \054 0 0 0 unknownin unknownout unknownrecv unknownsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR(""); -#define UNKNOWNOID 705 - -DATA(insert OID = 718 ( circle PGNSP PGUID 24 f b G f t \054 0 0 719 circle_in circle_out circle_recv circle_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric circle '(center,radius)'"); -#define CIRCLEOID 718 -DATA(insert OID = 719 ( _circle PGNSP PGUID -1 f b A f t \054 0 718 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 790 ( money PGNSP PGUID 8 FLOAT8PASSBYVAL b N f t \054 0 0 791 cash_in cash_out cash_recv cash_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("monetary amounts, $d,ddd.cc"); -#define CASHOID 790 -DATA(insert OID = 791 ( _money PGNSP PGUID -1 f b A f t \054 0 790 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 800 - 899 */ -DATA(insert OID = 829 ( macaddr PGNSP PGUID 6 f b U f t \054 0 0 1040 macaddr_in macaddr_out macaddr_recv macaddr_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XX:XX:XX:XX:XX:XX, MAC address"); -#define MACADDROID 829 -DATA(insert OID = 869 ( inet PGNSP PGUID -1 f b I t t \054 0 0 1041 inet_in inet_out inet_recv inet_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("IP address/netmask, host address, netmask optional"); -#define INETOID 869 -DATA(insert OID = 650 ( cidr PGNSP PGUID -1 f b I f t \054 0 0 651 cidr_in cidr_out cidr_recv cidr_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("network IP address/netmask, network address"); -#define CIDROID 650 -DATA(insert OID = 774 ( macaddr8 PGNSP PGUID 8 f b U f t \054 0 0 775 macaddr8_in macaddr8_out macaddr8_recv macaddr8_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XX:XX:XX:XX:XX:XX:XX:XX, MAC address"); -#define MACADDR8OID 774 - -/* OIDS 900 - 999 */ - -/* OIDS 1000 - 1099 */ -DATA(insert OID = 1000 ( _bool PGNSP PGUID -1 f b A f t \054 0 16 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1001 ( _bytea PGNSP PGUID -1 f b A f t \054 0 17 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1002 ( _char PGNSP PGUID -1 f b A f t \054 0 18 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1003 ( _name PGNSP PGUID -1 f b A f t \054 0 19 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1005 ( _int2 PGNSP PGUID -1 f b A f t \054 0 21 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define INT2ARRAYOID 1005 -DATA(insert OID = 1006 ( _int2vector PGNSP PGUID -1 f b A f t \054 0 22 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1007 ( _int4 PGNSP PGUID -1 f b A f t \054 0 23 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define INT4ARRAYOID 1007 -DATA(insert OID = 1008 ( _regproc PGNSP PGUID -1 f b A f t \054 0 24 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1009 ( _text PGNSP PGUID -1 f b A f t \054 0 25 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -#define TEXTARRAYOID 1009 -DATA(insert OID = 1028 ( _oid PGNSP PGUID -1 f b A f t \054 0 26 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define OIDARRAYOID 1028 -DATA(insert OID = 1010 ( _tid PGNSP PGUID -1 f b A f t \054 0 27 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1011 ( _xid PGNSP PGUID -1 f b A f t \054 0 28 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1012 ( _cid PGNSP PGUID -1 f b A f t \054 0 29 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1013 ( _oidvector PGNSP PGUID -1 f b A f t \054 0 30 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1014 ( _bpchar PGNSP PGUID -1 f b A f t \054 0 1042 0 array_in array_out array_recv array_send bpchartypmodin bpchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -DATA(insert OID = 1015 ( _varchar PGNSP PGUID -1 f b A f t \054 0 1043 0 array_in array_out array_recv array_send varchartypmodin varchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -DATA(insert OID = 1016 ( _int8 PGNSP PGUID -1 f b A f t \054 0 20 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1017 ( _point PGNSP PGUID -1 f b A f t \054 0 600 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1018 ( _lseg PGNSP PGUID -1 f b A f t \054 0 601 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1019 ( _path PGNSP PGUID -1 f b A f t \054 0 602 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1020 ( _box PGNSP PGUID -1 f b A f t \073 0 603 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1021 ( _float4 PGNSP PGUID -1 f b A f t \054 0 700 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define FLOAT4ARRAYOID 1021 -DATA(insert OID = 1022 ( _float8 PGNSP PGUID -1 f b A f t \054 0 701 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1023 ( _abstime PGNSP PGUID -1 f b A f t \054 0 702 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1024 ( _reltime PGNSP PGUID -1 f b A f t \054 0 703 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1025 ( _tinterval PGNSP PGUID -1 f b A f t \054 0 704 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1027 ( _polygon PGNSP PGUID -1 f b A f t \054 0 604 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1033 ( aclitem PGNSP PGUID 12 f b U f t \054 0 0 1034 aclitemin aclitemout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("access control list"); -#define ACLITEMOID 1033 -DATA(insert OID = 1034 ( _aclitem PGNSP PGUID -1 f b A f t \054 0 1033 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1040 ( _macaddr PGNSP PGUID -1 f b A f t \054 0 829 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 775 ( _macaddr8 PGNSP PGUID -1 f b A f t \054 0 774 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1041 ( _inet PGNSP PGUID -1 f b A f t \054 0 869 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 651 ( _cidr PGNSP PGUID -1 f b A f t \054 0 650 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1263 ( _cstring PGNSP PGUID -1 f b A f t \054 0 2275 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define CSTRINGARRAYOID 1263 - -DATA(insert OID = 1042 ( bpchar PGNSP PGUID -1 f b S f t \054 0 0 1014 bpcharin bpcharout bpcharrecv bpcharsend bpchartypmodin bpchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("char(length), blank-padded string, fixed storage length"); -#define BPCHAROID 1042 -DATA(insert OID = 1043 ( varchar PGNSP PGUID -1 f b S f t \054 0 0 1015 varcharin varcharout varcharrecv varcharsend varchartypmodin varchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("varchar(length), non-blank-padded string, variable storage length"); -#define VARCHAROID 1043 - -DATA(insert OID = 1082 ( date PGNSP PGUID 4 t b D f t \054 0 0 1182 date_in date_out date_recv date_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date"); -#define DATEOID 1082 -DATA(insert OID = 1083 ( time PGNSP PGUID 8 FLOAT8PASSBYVAL b D f t \054 0 0 1183 time_in time_out time_recv time_send timetypmodin timetypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("time of day"); -#define TIMEOID 1083 - -/* OIDS 1100 - 1199 */ -DATA(insert OID = 1114 ( timestamp PGNSP PGUID 8 FLOAT8PASSBYVAL b D f t \054 0 0 1115 timestamp_in timestamp_out timestamp_recv timestamp_send timestamptypmodin timestamptypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date and time"); -#define TIMESTAMPOID 1114 -DATA(insert OID = 1115 ( _timestamp PGNSP PGUID -1 f b A f t \054 0 1114 0 array_in array_out array_recv array_send timestamptypmodin timestamptypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1182 ( _date PGNSP PGUID -1 f b A f t \054 0 1082 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1183 ( _time PGNSP PGUID -1 f b A f t \054 0 1083 0 array_in array_out array_recv array_send timetypmodin timetypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1184 ( timestamptz PGNSP PGUID 8 FLOAT8PASSBYVAL b D t t \054 0 0 1185 timestamptz_in timestamptz_out timestamptz_recv timestamptz_send timestamptztypmodin timestamptztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date and time with time zone"); -#define TIMESTAMPTZOID 1184 -DATA(insert OID = 1185 ( _timestamptz PGNSP PGUID -1 f b A f t \054 0 1184 0 array_in array_out array_recv array_send timestamptztypmodin timestamptztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1186 ( interval PGNSP PGUID 16 f b T t t \054 0 0 1187 interval_in interval_out interval_recv interval_send intervaltypmodin intervaltypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("@ , time interval"); -#define INTERVALOID 1186 -DATA(insert OID = 1187 ( _interval PGNSP PGUID -1 f b A f t \054 0 1186 0 array_in array_out array_recv array_send intervaltypmodin intervaltypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1200 - 1299 */ -DATA(insert OID = 1231 ( _numeric PGNSP PGUID -1 f b A f t \054 0 1700 0 array_in array_out array_recv array_send numerictypmodin numerictypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1266 ( timetz PGNSP PGUID 12 f b D f t \054 0 0 1270 timetz_in timetz_out timetz_recv timetz_send timetztypmodin timetztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("time of day with time zone"); -#define TIMETZOID 1266 -DATA(insert OID = 1270 ( _timetz PGNSP PGUID -1 f b A f t \054 0 1266 0 array_in array_out array_recv array_send timetztypmodin timetztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1500 - 1599 */ -DATA(insert OID = 1560 ( bit PGNSP PGUID -1 f b V f t \054 0 0 1561 bit_in bit_out bit_recv bit_send bittypmodin bittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("fixed-length bit string"); -#define BITOID 1560 -DATA(insert OID = 1561 ( _bit PGNSP PGUID -1 f b A f t \054 0 1560 0 array_in array_out array_recv array_send bittypmodin bittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1562 ( varbit PGNSP PGUID -1 f b V t t \054 0 0 1563 varbit_in varbit_out varbit_recv varbit_send varbittypmodin varbittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("variable-length bit string"); -#define VARBITOID 1562 -DATA(insert OID = 1563 ( _varbit PGNSP PGUID -1 f b A f t \054 0 1562 0 array_in array_out array_recv array_send varbittypmodin varbittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1600 - 1699 */ - -/* OIDS 1700 - 1799 */ -DATA(insert OID = 1700 ( numeric PGNSP PGUID -1 f b N f t \054 0 0 1231 numeric_in numeric_out numeric_recv numeric_send numerictypmodin numerictypmodout - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("numeric(precision, decimal), arbitrary precision number"); -#define NUMERICOID 1700 - -DATA(insert OID = 1790 ( refcursor PGNSP PGUID -1 f b U f t \054 0 0 2201 textin textout textrecv textsend - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("reference to cursor (portal name)"); -#define REFCURSOROID 1790 - -/* OIDS 2200 - 2299 */ -DATA(insert OID = 2201 ( _refcursor PGNSP PGUID -1 f b A f t \054 0 1790 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 2202 ( regprocedure PGNSP PGUID 4 t b N f t \054 0 0 2207 regprocedurein regprocedureout regprocedurerecv regproceduresend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered procedure (with args)"); -#define REGPROCEDUREOID 2202 - -DATA(insert OID = 2203 ( regoper PGNSP PGUID 4 t b N f t \054 0 0 2208 regoperin regoperout regoperrecv regopersend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered operator"); -#define REGOPEROID 2203 - -DATA(insert OID = 2204 ( regoperator PGNSP PGUID 4 t b N f t \054 0 0 2209 regoperatorin regoperatorout regoperatorrecv regoperatorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered operator (with args)"); -#define REGOPERATOROID 2204 - -DATA(insert OID = 2205 ( regclass PGNSP PGUID 4 t b N f t \054 0 0 2210 regclassin regclassout regclassrecv regclasssend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered class"); -#define REGCLASSOID 2205 - -DATA(insert OID = 2206 ( regtype PGNSP PGUID 4 t b N f t \054 0 0 2211 regtypein regtypeout regtyperecv regtypesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered type"); -#define REGTYPEOID 2206 - -DATA(insert OID = 4096 ( regrole PGNSP PGUID 4 t b N f t \054 0 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered role"); -#define REGROLEOID 4096 - -DATA(insert OID = 4089 ( regnamespace PGNSP PGUID 4 t b N f t \054 0 0 4090 regnamespacein regnamespaceout regnamespacerecv regnamespacesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered namespace"); -#define REGNAMESPACEOID 4089 - -DATA(insert OID = 2207 ( _regprocedure PGNSP PGUID -1 f b A f t \054 0 2202 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2208 ( _regoper PGNSP PGUID -1 f b A f t \054 0 2203 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2209 ( _regoperator PGNSP PGUID -1 f b A f t \054 0 2204 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2210 ( _regclass PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2211 ( _regtype PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define REGTYPEARRAYOID 2211 -DATA(insert OID = 4097 ( _regrole PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* uuid */ -DATA(insert OID = 2950 ( uuid PGNSP PGUID 16 f b U f t \054 0 0 2951 uuid_in uuid_out uuid_recv uuid_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("UUID datatype"); -#define UUIDOID 2950 -DATA(insert OID = 2951 ( _uuid PGNSP PGUID -1 f b A f t \054 0 2950 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* pg_lsn */ -DATA(insert OID = 3220 ( pg_lsn PGNSP PGUID 8 FLOAT8PASSBYVAL b U f t \054 0 0 3221 pg_lsn_in pg_lsn_out pg_lsn_recv pg_lsn_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("PostgreSQL LSN datatype"); -#define LSNOID 3220 -DATA(insert OID = 3221 ( _pg_lsn PGNSP PGUID -1 f b A f t \054 0 3220 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* text search */ -DATA(insert OID = 3614 ( tsvector PGNSP PGUID -1 f b U f t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - ts_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("text representation for text search"); -#define TSVECTOROID 3614 -DATA(insert OID = 3642 ( gtsvector PGNSP PGUID -1 f b U f t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("GiST index internal text representation for text search"); -#define GTSVECTOROID 3642 -DATA(insert OID = 3615 ( tsquery PGNSP PGUID -1 f b U f t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("query representation for text search"); -#define TSQUERYOID 3615 -DATA(insert OID = 3734 ( regconfig PGNSP PGUID 4 t b N f t \054 0 0 3735 regconfigin regconfigout regconfigrecv regconfigsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered text search configuration"); -#define REGCONFIGOID 3734 -DATA(insert OID = 3769 ( regdictionary PGNSP PGUID 4 t b N f t \054 0 0 3770 regdictionaryin regdictionaryout regdictionaryrecv regdictionarysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered text search dictionary"); -#define REGDICTIONARYOID 3769 - -DATA(insert OID = 3643 ( _tsvector PGNSP PGUID -1 f b A f t \054 0 3614 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3644 ( _gtsvector PGNSP PGUID -1 f b A f t \054 0 3642 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3645 ( _tsquery PGNSP PGUID -1 f b A f t \054 0 3615 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3735 ( _regconfig PGNSP PGUID -1 f b A f t \054 0 3734 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b A f t \054 0 3769 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* jsonb */ -DATA(insert OID = 3802 ( jsonb PGNSP PGUID -1 f b U f t \054 0 0 3807 jsonb_in jsonb_out jsonb_recv jsonb_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("Binary JSON"); -#define JSONBOID 3802 -DATA(insert OID = 3807 ( _jsonb PGNSP PGUID -1 f b A f t \054 0 3802 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 2970 ( txid_snapshot PGNSP PGUID -1 f b U f t \054 0 0 2949 txid_snapshot_in txid_snapshot_out txid_snapshot_recv txid_snapshot_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("txid snapshot"); -DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b A f t \054 0 2970 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* range types */ -DATA(insert OID = 3904 ( int4range PGNSP PGUID -1 f r R f t \054 0 0 3905 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of integers"); -#define INT4RANGEOID 3904 -DATA(insert OID = 3905 ( _int4range PGNSP PGUID -1 f b A f t \054 0 3904 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3906 ( numrange PGNSP PGUID -1 f r R f t \054 0 0 3907 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of numerics"); -DATA(insert OID = 3907 ( _numrange PGNSP PGUID -1 f b A f t \054 0 3906 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3908 ( tsrange PGNSP PGUID -1 f r R f t \054 0 0 3909 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of timestamps without time zone"); -DATA(insert OID = 3909 ( _tsrange PGNSP PGUID -1 f b A f t \054 0 3908 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3910 ( tstzrange PGNSP PGUID -1 f r R f t \054 0 0 3911 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of timestamps with time zone"); -DATA(insert OID = 3911 ( _tstzrange PGNSP PGUID -1 f b A f t \054 0 3910 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3912 ( daterange PGNSP PGUID -1 f r R f t \054 0 0 3913 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of dates"); -DATA(insert OID = 3913 ( _daterange PGNSP PGUID -1 f b A f t \054 0 3912 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3926 ( int8range PGNSP PGUID -1 f r R f t \054 0 0 3927 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of bigints"); -DATA(insert OID = 3927 ( _int8range PGNSP PGUID -1 f b A f t \054 0 3926 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* - * pseudo-types - * - * types with typtype='p' represent various special cases in the type system. - * - * These cannot be used to define table columns, but are valid as function - * argument and result types (if supported by the function's implementation - * language). - * - * Note: cstring is a borderline case; it is still considered a pseudo-type, - * but there is now support for it in records and arrays. Perhaps we should - * just treat it as a regular base type? - */ -DATA(insert OID = 2249 ( record PGNSP PGUID -1 f p P f t \054 0 0 2287 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define RECORDOID 2249 -DATA(insert OID = 2287 ( _record PGNSP PGUID -1 f p P f t \054 0 2249 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define RECORDARRAYOID 2287 -DATA(insert OID = 2275 ( cstring PGNSP PGUID -2 f p P f t \054 0 0 1263 cstring_in cstring_out cstring_recv cstring_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -#define CSTRINGOID 2275 -DATA(insert OID = 2276 ( any PGNSP PGUID 4 t p P f t \054 0 0 0 any_in any_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYOID 2276 -DATA(insert OID = 2277 ( anyarray PGNSP PGUID -1 f p P f t \054 0 0 0 anyarray_in anyarray_out anyarray_recv anyarray_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYARRAYOID 2277 -DATA(insert OID = 2278 ( void PGNSP PGUID 4 t p P f t \054 0 0 0 void_in void_out void_recv void_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define VOIDOID 2278 -DATA(insert OID = 2279 ( trigger PGNSP PGUID 4 t p P f t \054 0 0 0 trigger_in trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define TRIGGEROID 2279 -DATA(insert OID = 3838 ( event_trigger PGNSP PGUID 4 t p P f t \054 0 0 0 event_trigger_in event_trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define EVTTRIGGEROID 3838 -DATA(insert OID = 2280 ( language_handler PGNSP PGUID 4 t p P f t \054 0 0 0 language_handler_in language_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define LANGUAGE_HANDLEROID 2280 -DATA(insert OID = 2281 ( internal PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 internal_in internal_out - - - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ )); -#define INTERNALOID 2281 -DATA(insert OID = 2282 ( opaque PGNSP PGUID 4 t p P f t \054 0 0 0 opaque_in opaque_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define OPAQUEOID 2282 -DATA(insert OID = 2283 ( anyelement PGNSP PGUID 4 t p P f t \054 0 0 0 anyelement_in anyelement_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYELEMENTOID 2283 -DATA(insert OID = 2776 ( anynonarray PGNSP PGUID 4 t p P f t \054 0 0 0 anynonarray_in anynonarray_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYNONARRAYOID 2776 -DATA(insert OID = 3500 ( anyenum PGNSP PGUID 4 t p P f t \054 0 0 0 anyenum_in anyenum_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYENUMOID 3500 -DATA(insert OID = 3115 ( fdw_handler PGNSP PGUID 4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define FDW_HANDLEROID 3115 -DATA(insert OID = 325 ( index_am_handler PGNSP PGUID 4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define INDEX_AM_HANDLEROID 325 -DATA(insert OID = 3310 ( tsm_handler PGNSP PGUID 4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define TSM_HANDLEROID 3310 -DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYRANGEOID 3831 - - -/* - * macros - */ -#define TYPTYPE_BASE 'b' /* base type (ordinary scalar type) */ -#define TYPTYPE_COMPOSITE 'c' /* composite (e.g., table's rowtype) */ -#define TYPTYPE_DOMAIN 'd' /* domain over another type */ -#define TYPTYPE_ENUM 'e' /* enumerated type */ -#define TYPTYPE_PSEUDO 'p' /* pseudo-type */ -#define TYPTYPE_RANGE 'r' /* range type */ - -#define TYPCATEGORY_INVALID '\0' /* not an allowed category */ -#define TYPCATEGORY_ARRAY 'A' -#define TYPCATEGORY_BOOLEAN 'B' -#define TYPCATEGORY_COMPOSITE 'C' -#define TYPCATEGORY_DATETIME 'D' -#define TYPCATEGORY_ENUM 'E' -#define TYPCATEGORY_GEOMETRIC 'G' -#define TYPCATEGORY_NETWORK 'I' /* think INET */ -#define TYPCATEGORY_NUMERIC 'N' -#define TYPCATEGORY_PSEUDOTYPE 'P' -#define TYPCATEGORY_RANGE 'R' -#define TYPCATEGORY_STRING 'S' -#define TYPCATEGORY_TIMESPAN 'T' -#define TYPCATEGORY_USER 'U' -#define TYPCATEGORY_BITSTRING 'V' /* er ... "varbit"? */ -#define TYPCATEGORY_UNKNOWN 'X' - -/* Is a type OID a polymorphic pseudotype? (Beware of multiple evaluation) */ -#define IsPolymorphicType(typid) \ - ((typid) == ANYELEMENTOID || \ - (typid) == ANYARRAYOID || \ - (typid) == ANYNONARRAYOID || \ - (typid) == ANYENUMOID || \ - (typid) == ANYRANGEOID) - -#endif /* PG_TYPE_H */ diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 18be4bfc5..65c0852ab 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -1,29 +1,30 @@ use marksman_escape::Escape; use regex::Regex; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; +use std::fmt::Write as _; use std::fs::File; use std::io::{BufWriter, Write}; +use std::iter; use std::path::Path; +use std::str; use crate::snake_to_camel; -const PG_TYPE_H: &str = include_str!("pg_type.h"); -const PG_RANGE_H: &str = include_str!("pg_range.h"); +const PG_TYPE_DAT: &str = include_str!("pg_type.dat"); +const PG_RANGE_DAT: &str = include_str!("pg_range.dat"); struct Type { - name: &'static str, + name: String, variant: String, ident: String, - kind: &'static str, + kind: String, element: u32, doc: String, } pub fn build(path: &Path) { let mut file = BufWriter::new(File::create(path.join("types/type_gen.rs")).unwrap()); - - let ranges = parse_ranges(); - let types = parse_types(&ranges); + let types = parse_types(); make_header(&mut file); make_enum(&mut file, &types); @@ -31,76 +32,220 @@ pub fn build(path: &Path) { make_consts(&mut file, &types); } -fn parse_ranges() -> BTreeMap { - let mut ranges = BTreeMap::new(); +struct DatParser<'a> { + it: iter::Peekable>, + s: &'a str, +} - for line in PG_RANGE_H.lines() { - if !line.starts_with("DATA") { - continue; +impl<'a> DatParser<'a> { + fn new(s: &'a str) -> DatParser<'a> { + DatParser { + it: s.char_indices().peekable(), + s, + } + } + + fn parse_array(&mut self) -> Vec> { + self.eat('['); + let mut vec = vec![]; + while !self.try_eat(']') { + let object = self.parse_object(); + vec.push(object); + } + self.eof(); + + vec + } + + fn parse_object(&mut self) -> HashMap { + let mut object = HashMap::new(); + + self.eat('{'); + loop { + let key = self.parse_ident(); + self.eat('='); + self.eat('>'); + let value = self.parse_string(); + object.insert(key, value); + if !self.try_eat(',') { + break; + } + } + self.eat('}'); + self.eat(','); + + object + } + + fn parse_ident(&mut self) -> String { + self.skip_ws(); + + let start = match self.it.peek() { + Some((i, _)) => *i, + None => return "".to_string(), + }; + + loop { + match self.it.peek() { + Some((_, 'a'..='z')) | Some((_, '_')) => { + self.it.next(); + } + Some((i, _)) => return self.s[start..*i].to_string(), + None => return self.s[start..].to_string(), + } } + } - let split = line.split_whitespace().collect::>(); + fn parse_string(&mut self) -> String { + self.skip_ws(); + + let mut s = String::new(); + + self.eat('\''); + loop { + match self.it.next() { + Some((_, '\'')) => return s, + Some((_, '\\')) => { + let (_, ch) = self.it.next().expect("unexpected eof"); + s.push(ch); + } + Some((_, ch)) => s.push(ch), + None => panic!("unexpected eof"), + } + } + } - let oid = split[2].parse().unwrap(); - let element = split[3].parse().unwrap(); + fn eat(&mut self, target: char) { + self.skip_ws(); - ranges.insert(oid, element); + match self.it.next() { + Some((_, ch)) if ch == target => {} + Some((_, ch)) => panic!("expected {} but got {}", target, ch), + None => panic!("expected {} but got eof", target), + } } - ranges + fn try_eat(&mut self, target: char) -> bool { + if self.peek(target) { + self.eat(target); + true + } else { + false + } + } + + fn peek(&mut self, target: char) -> bool { + self.skip_ws(); + + match self.it.peek() { + Some((_, ch)) if *ch == target => true, + _ => false, + } + } + + fn eof(&mut self) { + self.skip_ws(); + match self.it.next() { + Some((_, ch)) => panic!("expected eof but got {}", ch), + None => {} + } + } + + fn skip_ws(&mut self) { + loop { + match self.it.peek() { + Some(&(_, '#')) => self.skip_to('\n'), + Some(&(_, '\n')) | Some(&(_, ' ')) | Some(&(_, '\t')) => { + self.it.next(); + } + _ => break, + } + } + } + + fn skip_to(&mut self, target: char) { + for (_, ch) in &mut self.it { + if ch == target { + break; + } + } + } } -fn parse_types(ranges: &BTreeMap) -> BTreeMap { - let doc_re = Regex::new(r#"DESCR\("([^"]+)"\)"#).unwrap(); +fn parse_types() -> BTreeMap { + let raw_types = DatParser::new(PG_TYPE_DAT).parse_array(); + let raw_ranges = DatParser::new(PG_RANGE_DAT).parse_array(); + + let oids_by_name = raw_types + .iter() + .map(|m| (m["typname"].clone(), m["oid"].parse::().unwrap())) + .collect::>(); + + let range_elements = raw_ranges + .iter() + .map(|m| { + ( + oids_by_name[&*m["rngtypid"]], + oids_by_name[&*m["rngsubtype"]], + ) + }) + .collect::>(); + let range_vector_re = Regex::new("(range|vector)$").unwrap(); let array_re = Regex::new("^_(.*)").unwrap(); let mut types = BTreeMap::new(); - let mut lines = PG_TYPE_H.lines().peekable(); - while let Some(line) = lines.next() { - if !line.starts_with("DATA") { - continue; - } - - let split = line.split_whitespace().collect::>(); - - let oid = split[3].parse().unwrap(); + for raw_type in raw_types { + let oid = raw_type["oid"].parse::().unwrap(); - let name = split[5]; + let name = raw_type["typname"].clone(); - let ident = range_vector_re.replace(name, "_$1"); + let ident = range_vector_re.replace(&name, "_$1"); let ident = array_re.replace(&ident, "$1_array"); let variant = snake_to_camel(&ident); let ident = ident.to_ascii_uppercase(); - let kind = split[11]; + let kind = raw_type["typcategory"].clone(); // we need to be able to pull composite fields and enum variants at runtime if kind == "C" || kind == "E" { continue; } - let element = if let Some(&element) = ranges.get(&oid) { - element - } else { - split[16].parse().unwrap() + let element = match &*kind { + "R" => range_elements[&oid], + "A" => oids_by_name[&raw_type["typelem"]], + _ => 0, }; - let doc = array_re.replace(name, "$1[]"); - let mut doc = doc.to_ascii_uppercase(); - - let descr = lines - .peek() - .and_then(|line| doc_re.captures(line)) - .and_then(|captures| captures.at(1)); - if let Some(descr) = descr { - doc.push_str(" - "); - doc.push_str(descr); + let doc_name = array_re.replace(&name, "$1[]").to_ascii_uppercase(); + let mut doc = doc_name.clone(); + if let Some(descr) = raw_type.get("descr") { + write!(doc, " - {}", descr).unwrap(); } let doc = Escape::new(doc.as_bytes().iter().cloned()).collect(); let doc = String::from_utf8(doc).unwrap(); + if let Some(array_type_oid) = raw_type.get("array_type_oid") { + let array_type_oid = array_type_oid.parse::().unwrap(); + + let name = format!("_{}", name); + let variant = format!("{}Array", variant); + let doc = format!("{}[]", doc_name); + let ident = format!("{}_ARRAY", ident); + + let type_ = Type { + name, + variant, + ident, + kind: "A".to_string(), + element: oid, + doc, + }; + types.insert(array_type_oid, type_); + } + let type_ = Type { name, variant, @@ -109,7 +254,6 @@ fn parse_types(ranges: &BTreeMap) -> BTreeMap { element, doc, }; - types.insert(oid, type_); } @@ -221,7 +365,7 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { .unwrap(); for type_ in types.values() { - let kind = match type_.kind { + let kind = match &*type_.kind { "P" => "Pseudo".to_owned(), "A" => format!("Array(Type(Inner::{}))", types[&type_.element].variant), "R" => format!("Range(Type(Inner::{}))", types[&type_.element].variant), diff --git a/tokio-postgres/src/types/type_gen.rs b/tokio-postgres/src/types/type_gen.rs index f5b1b19c6..626cd1a3a 100644 --- a/tokio-postgres/src/types/type_gen.rs +++ b/tokio-postgres/src/types/type_gen.rs @@ -34,7 +34,7 @@ pub enum Inner { XmlArray, PgNodeTree, JsonArray, - Smgr, + TableAmHandler, IndexAmHandler, Point, Lseg, @@ -47,9 +47,6 @@ pub enum Inner { CidrArray, Float4, Float8, - Abstime, - Reltime, - Tinterval, Unknown, Circle, CircleArray, @@ -81,9 +78,6 @@ pub enum Inner { BoxArray, Float4Array, Float8Array, - AbstimeArray, - ReltimeArray, - TintervalArray, PolygonArray, OidArray, Aclitem, @@ -172,10 +166,13 @@ pub enum Inner { DateRangeArray, Int8Range, Int8RangeArray, + Jsonpath, + JsonpathArray, Regnamespace, RegnamespaceArray, Regrole, RegroleArray, + PgMcvList, Other(Arc), } @@ -203,7 +200,7 @@ impl Inner { 143 => Some(Inner::XmlArray), 194 => Some(Inner::PgNodeTree), 199 => Some(Inner::JsonArray), - 210 => Some(Inner::Smgr), + 269 => Some(Inner::TableAmHandler), 325 => Some(Inner::IndexAmHandler), 600 => Some(Inner::Point), 601 => Some(Inner::Lseg), @@ -216,9 +213,6 @@ impl Inner { 651 => Some(Inner::CidrArray), 700 => Some(Inner::Float4), 701 => Some(Inner::Float8), - 702 => Some(Inner::Abstime), - 703 => Some(Inner::Reltime), - 704 => Some(Inner::Tinterval), 705 => Some(Inner::Unknown), 718 => Some(Inner::Circle), 719 => Some(Inner::CircleArray), @@ -250,9 +244,6 @@ impl Inner { 1020 => Some(Inner::BoxArray), 1021 => Some(Inner::Float4Array), 1022 => Some(Inner::Float8Array), - 1023 => Some(Inner::AbstimeArray), - 1024 => Some(Inner::ReltimeArray), - 1025 => Some(Inner::TintervalArray), 1027 => Some(Inner::PolygonArray), 1028 => Some(Inner::OidArray), 1033 => Some(Inner::Aclitem), @@ -341,10 +332,13 @@ impl Inner { 3913 => Some(Inner::DateRangeArray), 3926 => Some(Inner::Int8Range), 3927 => Some(Inner::Int8RangeArray), + 4072 => Some(Inner::Jsonpath), + 4073 => Some(Inner::JsonpathArray), 4089 => Some(Inner::Regnamespace), 4090 => Some(Inner::RegnamespaceArray), 4096 => Some(Inner::Regrole), 4097 => Some(Inner::RegroleArray), + 5017 => Some(Inner::PgMcvList), _ => None, } } @@ -372,7 +366,7 @@ impl Inner { Inner::XmlArray => 143, Inner::PgNodeTree => 194, Inner::JsonArray => 199, - Inner::Smgr => 210, + Inner::TableAmHandler => 269, Inner::IndexAmHandler => 325, Inner::Point => 600, Inner::Lseg => 601, @@ -385,9 +379,6 @@ impl Inner { Inner::CidrArray => 651, Inner::Float4 => 700, Inner::Float8 => 701, - Inner::Abstime => 702, - Inner::Reltime => 703, - Inner::Tinterval => 704, Inner::Unknown => 705, Inner::Circle => 718, Inner::CircleArray => 719, @@ -419,9 +410,6 @@ impl Inner { Inner::BoxArray => 1020, Inner::Float4Array => 1021, Inner::Float8Array => 1022, - Inner::AbstimeArray => 1023, - Inner::ReltimeArray => 1024, - Inner::TintervalArray => 1025, Inner::PolygonArray => 1027, Inner::OidArray => 1028, Inner::Aclitem => 1033, @@ -510,10 +498,13 @@ impl Inner { Inner::DateRangeArray => 3913, Inner::Int8Range => 3926, Inner::Int8RangeArray => 3927, + Inner::Jsonpath => 4072, + Inner::JsonpathArray => 4073, Inner::Regnamespace => 4089, Inner::RegnamespaceArray => 4090, Inner::Regrole => 4096, Inner::RegroleArray => 4097, + Inner::PgMcvList => 5017, Inner::Other(ref u) => u.oid, } } @@ -541,7 +532,7 @@ impl Inner { Inner::XmlArray => &Kind::Array(Type(Inner::Xml)), Inner::PgNodeTree => &Kind::Simple, Inner::JsonArray => &Kind::Array(Type(Inner::Json)), - Inner::Smgr => &Kind::Simple, + Inner::TableAmHandler => &Kind::Pseudo, Inner::IndexAmHandler => &Kind::Pseudo, Inner::Point => &Kind::Simple, Inner::Lseg => &Kind::Simple, @@ -554,9 +545,6 @@ impl Inner { Inner::CidrArray => &Kind::Array(Type(Inner::Cidr)), Inner::Float4 => &Kind::Simple, Inner::Float8 => &Kind::Simple, - Inner::Abstime => &Kind::Simple, - Inner::Reltime => &Kind::Simple, - Inner::Tinterval => &Kind::Simple, Inner::Unknown => &Kind::Simple, Inner::Circle => &Kind::Simple, Inner::CircleArray => &Kind::Array(Type(Inner::Circle)), @@ -588,9 +576,6 @@ impl Inner { Inner::BoxArray => &Kind::Array(Type(Inner::Box)), Inner::Float4Array => &Kind::Array(Type(Inner::Float4)), Inner::Float8Array => &Kind::Array(Type(Inner::Float8)), - Inner::AbstimeArray => &Kind::Array(Type(Inner::Abstime)), - Inner::ReltimeArray => &Kind::Array(Type(Inner::Reltime)), - Inner::TintervalArray => &Kind::Array(Type(Inner::Tinterval)), Inner::PolygonArray => &Kind::Array(Type(Inner::Polygon)), Inner::OidArray => &Kind::Array(Type(Inner::Oid)), Inner::Aclitem => &Kind::Simple, @@ -679,10 +664,13 @@ impl Inner { Inner::DateRangeArray => &Kind::Array(Type(Inner::DateRange)), Inner::Int8Range => &Kind::Range(Type(Inner::Int8)), Inner::Int8RangeArray => &Kind::Array(Type(Inner::Int8Range)), + Inner::Jsonpath => &Kind::Simple, + Inner::JsonpathArray => &Kind::Array(Type(Inner::Jsonpath)), Inner::Regnamespace => &Kind::Simple, Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), Inner::Regrole => &Kind::Simple, Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), + Inner::PgMcvList => &Kind::Simple, Inner::Other(ref u) => &u.kind, } } @@ -710,7 +698,7 @@ impl Inner { Inner::XmlArray => "_xml", Inner::PgNodeTree => "pg_node_tree", Inner::JsonArray => "_json", - Inner::Smgr => "smgr", + Inner::TableAmHandler => "table_am_handler", Inner::IndexAmHandler => "index_am_handler", Inner::Point => "point", Inner::Lseg => "lseg", @@ -723,9 +711,6 @@ impl Inner { Inner::CidrArray => "_cidr", Inner::Float4 => "float4", Inner::Float8 => "float8", - Inner::Abstime => "abstime", - Inner::Reltime => "reltime", - Inner::Tinterval => "tinterval", Inner::Unknown => "unknown", Inner::Circle => "circle", Inner::CircleArray => "_circle", @@ -757,9 +742,6 @@ impl Inner { Inner::BoxArray => "_box", Inner::Float4Array => "_float4", Inner::Float8Array => "_float8", - Inner::AbstimeArray => "_abstime", - Inner::ReltimeArray => "_reltime", - Inner::TintervalArray => "_tinterval", Inner::PolygonArray => "_polygon", Inner::OidArray => "_oid", Inner::Aclitem => "aclitem", @@ -848,10 +830,13 @@ impl Inner { Inner::DateRangeArray => "_daterange", Inner::Int8Range => "int8range", Inner::Int8RangeArray => "_int8range", + Inner::Jsonpath => "jsonpath", + Inner::JsonpathArray => "_jsonpath", Inner::Regnamespace => "regnamespace", Inner::RegnamespaceArray => "_regnamespace", Inner::Regrole => "regrole", Inner::RegroleArray => "_regrole", + Inner::PgMcvList => "pg_mcv_list", Inner::Other(ref u) => &u.name, } } @@ -905,7 +890,7 @@ impl Type { /// PG_DDL_COMMAND - internal type for passing CollectedCommand pub const PG_DDL_COMMAND: Type = Type(Inner::PgDdlCommand); - /// JSON + /// JSON - JSON stored as text pub const JSON: Type = Type(Inner::Json); /// XML - XML content @@ -920,10 +905,10 @@ impl Type { /// JSON[] pub const JSON_ARRAY: Type = Type(Inner::JsonArray); - /// SMGR - storage manager - pub const SMGR: Type = Type(Inner::Smgr); + /// TABLE_AM_HANDLER + pub const TABLE_AM_HANDLER: Type = Type(Inner::TableAmHandler); - /// INDEX_AM_HANDLER + /// INDEX_AM_HANDLER - pseudo-type for the result of an index AM handler function pub const INDEX_AM_HANDLER: Type = Type(Inner::IndexAmHandler); /// POINT - geometric point '(x, y)' @@ -959,16 +944,7 @@ impl Type { /// FLOAT8 - double-precision floating point number, 8-byte storage pub const FLOAT8: Type = Type(Inner::Float8); - /// ABSTIME - absolute, limited-range date and time (Unix system time) - pub const ABSTIME: Type = Type(Inner::Abstime); - - /// RELTIME - relative, limited-range time interval (Unix delta time) - pub const RELTIME: Type = Type(Inner::Reltime); - - /// TINTERVAL - (abstime,abstime), time interval - pub const TINTERVAL: Type = Type(Inner::Tinterval); - - /// UNKNOWN + /// UNKNOWN - pseudo-type representing an undetermined type pub const UNKNOWN: Type = Type(Inner::Unknown); /// CIRCLE - geometric circle '(center,radius)' @@ -1061,15 +1037,6 @@ impl Type { /// FLOAT8[] pub const FLOAT8_ARRAY: Type = Type(Inner::Float8Array); - /// ABSTIME[] - pub const ABSTIME_ARRAY: Type = Type(Inner::AbstimeArray); - - /// RELTIME[] - pub const RELTIME_ARRAY: Type = Type(Inner::ReltimeArray); - - /// TINTERVAL[] - pub const TINTERVAL_ARRAY: Type = Type(Inner::TintervalArray); - /// POLYGON[] pub const POLYGON_ARRAY: Type = Type(Inner::PolygonArray); @@ -1187,40 +1154,40 @@ impl Type { /// REGTYPE[] pub const REGTYPE_ARRAY: Type = Type(Inner::RegtypeArray); - /// RECORD + /// RECORD - pseudo-type representing any composite type pub const RECORD: Type = Type(Inner::Record); - /// CSTRING + /// CSTRING - C-style string pub const CSTRING: Type = Type(Inner::Cstring); - /// ANY + /// ANY - pseudo-type representing any type pub const ANY: Type = Type(Inner::Any); - /// ANYARRAY + /// ANYARRAY - pseudo-type representing a polymorphic array type pub const ANYARRAY: Type = Type(Inner::Anyarray); - /// VOID + /// VOID - pseudo-type for the result of a function with no real result pub const VOID: Type = Type(Inner::Void); - /// TRIGGER + /// TRIGGER - pseudo-type for the result of a trigger function pub const TRIGGER: Type = Type(Inner::Trigger); - /// LANGUAGE_HANDLER + /// LANGUAGE_HANDLER - pseudo-type for the result of a language handler function pub const LANGUAGE_HANDLER: Type = Type(Inner::LanguageHandler); - /// INTERNAL + /// INTERNAL - pseudo-type representing an internal data structure pub const INTERNAL: Type = Type(Inner::Internal); - /// OPAQUE + /// OPAQUE - obsolete, deprecated pseudo-type pub const OPAQUE: Type = Type(Inner::Opaque); - /// ANYELEMENT + /// ANYELEMENT - pseudo-type representing a polymorphic base type pub const ANYELEMENT: Type = Type(Inner::Anyelement); /// RECORD[] pub const RECORD_ARRAY: Type = Type(Inner::RecordArray); - /// ANYNONARRAY + /// ANYNONARRAY - pseudo-type representing a polymorphic base type that is not an array pub const ANYNONARRAY: Type = Type(Inner::Anynonarray); /// TXID_SNAPSHOT[] @@ -1235,7 +1202,7 @@ impl Type { /// TXID_SNAPSHOT - txid snapshot pub const TXID_SNAPSHOT: Type = Type(Inner::TxidSnapshot); - /// FDW_HANDLER + /// FDW_HANDLER - pseudo-type for the result of an FDW handler function pub const FDW_HANDLER: Type = Type(Inner::FdwHandler); /// PG_LSN - PostgreSQL LSN datatype @@ -1244,7 +1211,7 @@ impl Type { /// PG_LSN[] pub const PG_LSN_ARRAY: Type = Type(Inner::PgLsnArray); - /// TSM_HANDLER + /// TSM_HANDLER - pseudo-type for the result of a tablesample method function pub const TSM_HANDLER: Type = Type(Inner::TsmHandler); /// PG_NDISTINCT - multivariate ndistinct coefficients @@ -1253,7 +1220,7 @@ impl Type { /// PG_DEPENDENCIES - multivariate dependencies pub const PG_DEPENDENCIES: Type = Type(Inner::PgDependencies); - /// ANYENUM + /// ANYENUM - pseudo-type representing a polymorphic base type that is an enum pub const ANYENUM: Type = Type(Inner::Anyenum); /// TSVECTOR - text representation for text search @@ -1292,10 +1259,10 @@ impl Type { /// JSONB[] pub const JSONB_ARRAY: Type = Type(Inner::JsonbArray); - /// ANYRANGE + /// ANYRANGE - pseudo-type representing a polymorphic base type that is a range pub const ANY_RANGE: Type = Type(Inner::AnyRange); - /// EVENT_TRIGGER + /// EVENT_TRIGGER - pseudo-type for the result of an event trigger function pub const EVENT_TRIGGER: Type = Type(Inner::EventTrigger); /// INT4RANGE - range of integers @@ -1334,6 +1301,12 @@ impl Type { /// INT8RANGE[] pub const INT8_RANGE_ARRAY: Type = Type(Inner::Int8RangeArray); + /// JSONPATH - JSON path + pub const JSONPATH: Type = Type(Inner::Jsonpath); + + /// JSONPATH[] + pub const JSONPATH_ARRAY: Type = Type(Inner::JsonpathArray); + /// REGNAMESPACE - registered namespace pub const REGNAMESPACE: Type = Type(Inner::Regnamespace); @@ -1345,4 +1318,7 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); + + /// PG_MCV_LIST - multivariate MCV list + pub const PG_MCV_LIST: Type = Type(Inner::PgMcvList); } From 4b5bcbb6020d70f3fcd32de4a7a5c5d075e54426 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Oct 2019 17:10:34 -0700 Subject: [PATCH 254/819] Split ToSql/FromSql out to a separate crate Prep for a `derive` feature. --- Cargo.toml | 1 + codegen/src/main.rs | 7 ++--- codegen/src/sqlstate.rs | 5 ++- codegen/src/type_gen.rs | 7 ++--- postgres-types/Cargo.toml | 25 +++++++++++++++ .../src}/bit_vec_06.rs | 2 +- .../types => postgres-types/src}/chrono_04.rs | 2 +- .../types => postgres-types/src}/eui48_04.rs | 2 +- .../src}/geo_types_04.rs | 2 +- .../types/mod.rs => postgres-types/src/lib.rs | 31 ++++++++++++------- .../src}/serde_json_1.rs | 2 +- .../types => postgres-types/src}/special.rs | 2 +- .../types => postgres-types/src}/type_gen.rs | 2 +- .../types => postgres-types/src}/uuid_07.rs | 0 postgres/src/lib.rs | 4 +-- tokio-postgres/Cargo.toml | 21 +++++-------- tokio-postgres/src/types.rs | 6 ++++ tokio-postgres/tests/test/types/mod.rs | 2 +- 18 files changed, 74 insertions(+), 49 deletions(-) create mode 100644 postgres-types/Cargo.toml rename {tokio-postgres/src/types => postgres-types/src}/bit_vec_06.rs (93%) rename {tokio-postgres/src/types => postgres-types/src}/chrono_04.rs (98%) rename {tokio-postgres/src/types => postgres-types/src}/eui48_04.rs (92%) rename {tokio-postgres/src/types => postgres-types/src}/geo_types_04.rs (97%) rename tokio-postgres/src/types/mod.rs => postgres-types/src/lib.rs (96%) rename {tokio-postgres/src/types => postgres-types/src}/serde_json_1.rs (97%) rename {tokio-postgres/src/types => postgres-types/src}/special.rs (98%) rename {tokio-postgres/src/types => postgres-types/src}/type_gen.rs (99%) rename {tokio-postgres/src/types => postgres-types/src}/uuid_07.rs (100%) create mode 100644 tokio-postgres/src/types.rs diff --git a/Cargo.toml b/Cargo.toml index cf7f99ca2..fe3568fa9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "postgres-native-tls", "postgres-openssl", "postgres-protocol", + "postgres-types", "tokio-postgres", ] diff --git a/codegen/src/main.rs b/codegen/src/main.rs index 6d99ded60..d0a74f2dd 100644 --- a/codegen/src/main.rs +++ b/codegen/src/main.rs @@ -6,15 +6,12 @@ extern crate marksman_escape; extern crate phf_codegen; extern crate regex; -use std::path::Path; - mod sqlstate; mod type_gen; fn main() { - let path = Path::new("../tokio-postgres/src"); - sqlstate::build(path); - type_gen::build(path); + sqlstate::build(); + type_gen::build(); } fn snake_to_camel(s: &str) -> String { diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index 2da7e8dce..3d7a61430 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -2,12 +2,11 @@ use linked_hash_map::LinkedHashMap; use phf_codegen; use std::fs::File; use std::io::{BufWriter, Write}; -use std::path::Path; const ERRCODES_TXT: &str = include_str!("errcodes.txt"); -pub fn build(path: &Path) { - let mut file = BufWriter::new(File::create(path.join("error/sqlstate.rs")).unwrap()); +pub fn build() { + let mut file = BufWriter::new(File::create("../tokio-postgres/src/error/sqlstate.rs").unwrap()); let codes = parse_codes(); diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 65c0852ab..1e882ab18 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -5,7 +5,6 @@ use std::fmt::Write as _; use std::fs::File; use std::io::{BufWriter, Write}; use std::iter; -use std::path::Path; use std::str; use crate::snake_to_camel; @@ -22,8 +21,8 @@ struct Type { doc: String, } -pub fn build(path: &Path) { - let mut file = BufWriter::new(File::create(path.join("types/type_gen.rs")).unwrap()); +pub fn build() { + let mut file = BufWriter::new(File::create("../postgres-types/src/type_gen.rs").unwrap()); let types = parse_types(); make_header(&mut file); @@ -266,7 +265,7 @@ fn make_header(w: &mut BufWriter) { "// Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::types::{{Type, Oid, Kind}}; +use crate::{{Type, Oid, Kind}}; #[derive(PartialEq, Eq, Debug)] pub struct Other {{ diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml new file mode 100644 index 000000000..a3c762a5c --- /dev/null +++ b/postgres-types/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "postgres-types" +version = "0.1.0" +authors = ["Steven Fackler "] +edition = "2018" + +[features] +"with-bit-vec-0_6" = ["bit-vec-06"] +"with-chrono-0_4" = ["chrono-04"] +"with-eui48-0_4" = ["eui48-04"] +"with-geo-types-0_4" = ["geo-types-04"] +with-serde_json-1 = ["serde-1", "serde_json-1"] +"with-uuid-0_7" = ["uuid-07"] + +[dependencies] +fallible-iterator = "0.2" +postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } + +bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } +chrono-04 = { version = "0.4", package = "chrono", optional = true } +eui48-04 = { version = "0.4", package = "eui48", optional = true } +geo-types-04 = { version = "0.4", package = "geo-types", optional = true } +serde-1 = { version = "1.0", package = "serde", optional = true } +serde_json-1 = { version = "1.0", package = "serde_json", optional = true } +uuid-07 = { version = "0.7", package = "uuid", optional = true } diff --git a/tokio-postgres/src/types/bit_vec_06.rs b/postgres-types/src/bit_vec_06.rs similarity index 93% rename from tokio-postgres/src/types/bit_vec_06.rs rename to postgres-types/src/bit_vec_06.rs index a68ed35a8..52fb6d1c1 100644 --- a/tokio-postgres/src/types/bit_vec_06.rs +++ b/postgres-types/src/bit_vec_06.rs @@ -2,7 +2,7 @@ use bit_vec_06::BitVec; use postgres_protocol::types; use std::error::Error; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for BitVec { fn from_sql(_: &Type, raw: &[u8]) -> Result> { diff --git a/tokio-postgres/src/types/chrono_04.rs b/postgres-types/src/chrono_04.rs similarity index 98% rename from tokio-postgres/src/types/chrono_04.rs rename to postgres-types/src/chrono_04.rs index e414a93fe..8a3c8a222 100644 --- a/tokio-postgres/src/types/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -2,7 +2,7 @@ use chrono_04::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime use postgres_protocol::types; use std::error::Error; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; fn base() -> NaiveDateTime { NaiveDate::from_ymd(2000, 1, 1).and_hms(0, 0, 0) diff --git a/tokio-postgres/src/types/eui48_04.rs b/postgres-types/src/eui48_04.rs similarity index 92% rename from tokio-postgres/src/types/eui48_04.rs rename to postgres-types/src/eui48_04.rs index d0a67f8a4..3e7422ee0 100644 --- a/tokio-postgres/src/types/eui48_04.rs +++ b/postgres-types/src/eui48_04.rs @@ -2,7 +2,7 @@ use eui48_04::MacAddress; use postgres_protocol::types; use std::error::Error; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for MacAddress { fn from_sql(_: &Type, raw: &[u8]) -> Result> { diff --git a/tokio-postgres/src/types/geo_types_04.rs b/postgres-types/src/geo_types_04.rs similarity index 97% rename from tokio-postgres/src/types/geo_types_04.rs rename to postgres-types/src/geo_types_04.rs index b3e7245c8..f51273f21 100644 --- a/tokio-postgres/src/types/geo_types_04.rs +++ b/postgres-types/src/geo_types_04.rs @@ -3,7 +3,7 @@ use geo_types_04::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for Point { fn from_sql(_: &Type, raw: &[u8]) -> Result> { diff --git a/tokio-postgres/src/types/mod.rs b/postgres-types/src/lib.rs similarity index 96% rename from tokio-postgres/src/types/mod.rs rename to postgres-types/src/lib.rs index f938bd7cc..0224abfad 100644 --- a/tokio-postgres/src/types/mod.rs +++ b/postgres-types/src/lib.rs @@ -1,8 +1,13 @@ -//! Types. +//! Conversions to and from Postgres types. +//! +//! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it +//! unless you want to define your own `ToSql` or `FromSql` definitions. +#![warn(missing_docs)] use fallible_iterator::FallibleIterator; use postgres_protocol; use postgres_protocol::types::{self, ArrayDimension}; +use std::any::type_name; use std::borrow::Cow; use std::collections::HashMap; use std::error::Error; @@ -12,12 +17,12 @@ use std::net::IpAddr; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::types::type_gen::{Inner, Other}; +use crate::type_gen::{Inner, Other}; #[doc(inline)] pub use postgres_protocol::Oid; -pub use crate::types::special::{Date, Timestamp}; +pub use crate::special::{Date, Timestamp}; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946_684_800; @@ -29,9 +34,9 @@ const NSEC_PER_USEC: u64 = 1_000; #[macro_export] macro_rules! accepts { ($($expected:ident),+) => ( - fn accepts(ty: &$crate::types::Type) -> bool { + fn accepts(ty: &$crate::Type) -> bool { match *ty { - $($crate::types::Type::$expected)|+ => true, + $($crate::Type::$expected)|+ => true, _ => false } } @@ -45,13 +50,13 @@ macro_rules! accepts { macro_rules! to_sql_checked { () => { fn to_sql_checked(&self, - ty: &$crate::types::Type, + ty: &$crate::Type, out: &mut ::std::vec::Vec) - -> ::std::result::Result<$crate::types::IsNull, + -> ::std::result::Result<$crate::IsNull, Box> { - $crate::types::__to_sql_checked(self, ty, out) + $crate::__to_sql_checked(self, ty, out) } } } @@ -91,7 +96,6 @@ mod type_gen; #[cfg(feature = "with-serde_json-1")] pub use crate::types::serde_json_1::Json; -use std::any::type_name; /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] @@ -108,7 +112,8 @@ impl fmt::Display for Type { } impl Type { - pub(crate) fn new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { + /// Creates a new `Type`. + pub fn new(name: String, oid: Oid, kind: Kind, schema: String) -> Type { Type(Inner::Other(Arc::new(Other { name, oid, @@ -176,7 +181,8 @@ pub struct Field { } impl Field { - pub(crate) fn new(name: String, type_: Type) -> Field { + /// Creates a new `Field`. + pub fn new(name: String, type_: Type) -> Field { Field { name, type_ } } @@ -225,7 +231,8 @@ impl fmt::Display for WrongType { impl Error for WrongType {} impl WrongType { - pub(crate) fn new(ty: Type) -> WrongType { + /// Creates a new `WrongType` error. + pub fn new(ty: Type) -> WrongType { WrongType { postgres: ty, rust: type_name::(), diff --git a/tokio-postgres/src/types/serde_json_1.rs b/postgres-types/src/serde_json_1.rs similarity index 97% rename from tokio-postgres/src/types/serde_json_1.rs rename to postgres-types/src/serde_json_1.rs index b3b2e3c90..01616d32f 100644 --- a/tokio-postgres/src/types/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -4,7 +4,7 @@ use std::error::Error; use std::fmt::Debug; use std::io::Read; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. #[derive(Debug)] diff --git a/tokio-postgres/src/types/special.rs b/postgres-types/src/special.rs similarity index 98% rename from tokio-postgres/src/types/special.rs rename to postgres-types/src/special.rs index 15538f7a3..b1da02a74 100644 --- a/tokio-postgres/src/types/special.rs +++ b/postgres-types/src/special.rs @@ -2,7 +2,7 @@ use postgres_protocol::types; use std::error::Error; use std::{i32, i64}; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; /// A wrapper that can be used to represent infinity with `Type::Date` types. #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/tokio-postgres/src/types/type_gen.rs b/postgres-types/src/type_gen.rs similarity index 99% rename from tokio-postgres/src/types/type_gen.rs rename to postgres-types/src/type_gen.rs index 626cd1a3a..0ff5a2192 100644 --- a/tokio-postgres/src/types/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::types::{Kind, Oid, Type}; +use crate::{Kind, Oid, Type}; #[derive(PartialEq, Eq, Debug)] pub struct Other { diff --git a/tokio-postgres/src/types/uuid_07.rs b/postgres-types/src/uuid_07.rs similarity index 100% rename from tokio-postgres/src/types/uuid_07.rs rename to postgres-types/src/uuid_07.rs diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 901d3c4eb..cf30fd26a 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -62,9 +62,7 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] pub use tokio_postgres::Socket; -pub use tokio_postgres::{ - accepts, error, row, tls, to_sql_checked, types, Column, Portal, SimpleQueryMessage, Statement, -}; +pub use tokio_postgres::{error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement}; pub use crate::client::*; #[cfg(feature = "runtime")] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 63e174d58..ff83c50ee 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -23,12 +23,12 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds", "tokio-executor", "lazy_static"] -"with-bit-vec-0_6" = ["bit-vec-06"] -"with-chrono-0_4" = ["chrono-04"] -"with-eui48-0_4" = ["eui48-04"] -"with-geo-types-0_4" = ["geo-types-04"] -with-serde_json-1 = ["serde-1", "serde_json-1"] -"with-uuid-0_7" = ["uuid-07"] +"with-bit-vec-0_6" = ["postgres-types/with-bit-vec-0_6"] +"with-chrono-0_4" = ["postgres-types/with-chrono-0_4"] +"with-eui48-0_4" = ["postgres-types/with-eui48-0_4"] +"with-geo-types-0_4" = ["postgres-types/with-geo-types-0_4"] +with-serde_json-1 = ["postgres-types/with-serde_json-1"] +"with-uuid-0_7" = ["postgres-types/with-uuid-0_7"] [dependencies] bytes = "0.4" @@ -40,19 +40,12 @@ percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } +postgres-types = { version = "0.1.0", path = "../postgres-types" } tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } tokio-executor = { version = "=0.2.0-alpha.6", optional = true } lazy_static = { version = "1.0", optional = true } -bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } -chrono-04 = { version = "0.4", package = "chrono", optional = true } -eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-types-04 = { version = "0.4", package = "geo-types", optional = true } -serde-1 = { version = "1.0", package = "serde", optional = true } -serde_json-1 = { version = "1.0", package = "serde_json", optional = true } -uuid-07 = { version = "0.7", package = "uuid", optional = true } - [dev-dependencies] tokio = "=0.2.0-alpha.6" env_logger = "0.5" diff --git a/tokio-postgres/src/types.rs b/tokio-postgres/src/types.rs new file mode 100644 index 000000000..b2e15d059 --- /dev/null +++ b/tokio-postgres/src/types.rs @@ -0,0 +1,6 @@ +//! Types. +//! +//! This module is a reexport of the `postgres_types` crate. + +#[doc(inline)] +pub use postgres_types::*; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index b89434fa4..7311ea28b 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -7,7 +7,7 @@ use std::fmt; use std::net::IpAddr; use std::result; use std::time::{Duration, UNIX_EPOCH}; -use tokio_postgres::to_sql_checked; +use postgres_types::to_sql_checked; use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; use crate::connect; From c92e95dac2403a58dc105e56b05022d35cb5e122 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 7 Oct 2019 17:18:00 -0700 Subject: [PATCH 255/819] Fix tests --- postgres-types/src/lib.rs | 5 ++--- postgres-types/src/uuid_07.rs | 2 +- tokio-postgres/Cargo.toml | 8 ++++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 0224abfad..745b4a85e 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -17,6 +17,8 @@ use std::net::IpAddr; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +#[cfg(feature = "with-serde_json-1")] +pub use crate::serde_json_1::Json; use crate::type_gen::{Inner, Other}; #[doc(inline)] @@ -94,9 +96,6 @@ mod uuid_07; mod special; mod type_gen; -#[cfg(feature = "with-serde_json-1")] -pub use crate::types::serde_json_1::Json; - /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Type(Inner); diff --git a/postgres-types/src/uuid_07.rs b/postgres-types/src/uuid_07.rs index c78d8a726..951992312 100644 --- a/postgres-types/src/uuid_07.rs +++ b/postgres-types/src/uuid_07.rs @@ -2,7 +2,7 @@ use postgres_protocol::types; use std::error::Error; use uuid_07::Uuid; -use crate::types::{FromSql, IsNull, ToSql, Type}; +use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for Uuid { fn from_sql(_: &Type, raw: &[u8]) -> Result> { diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ff83c50ee..8b88aab8c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -49,3 +49,11 @@ lazy_static = { version = "1.0", optional = true } [dev-dependencies] tokio = "=0.2.0-alpha.6" env_logger = "0.5" + +bit-vec-06 = { version = "0.6", package = "bit-vec" } +chrono-04 = { version = "0.4", package = "chrono" } +eui48-04 = { version = "0.4", package = "eui48" } +geo-types-04 = { version = "0.4", package = "geo-types" } +serde-1 = { version = "1.0", package = "serde" } +serde_json-1 = { version = "1.0", package = "serde_json" } +uuid-07 = { version = "0.7", package = "uuid" } From 3650d4a6de22356686bb8c881f3b1e6cde65431b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 04:25:06 -0700 Subject: [PATCH 256/819] Implement traits for NoTls Closes #495 --- tokio-postgres/src/tls.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 109caac18..78940f337 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -71,6 +71,7 @@ pub trait TlsConnect { /// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error. /// /// This can be used when `sslmode` is `none` or `prefer`. +#[derive(Debug, Copy, Clone)] pub struct NoTls; #[cfg(feature = "runtime")] From e0e8c45159f6c255893e9cb4e1e7b7a20668b182 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 16:20:30 -0700 Subject: [PATCH 257/819] Remove unneeded feature gate --- tokio-postgres/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 98c245fd3..023c4ad35 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,8 +3,6 @@ //! # Example //! //! ```no_run -//! #![feature(async_await)] -//! //! use futures::{FutureExt, TryStreamExt}; //! use tokio_postgres::{NoTls, Error, Row}; //! From 286ecdb5b9ce3e70b08cb4095ec286b56c1cd70f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 28 Sep 2019 10:48:15 -0400 Subject: [PATCH 258/819] Start on borrow overhaul --- tokio-postgres/src/client.rs | 62 +++++++++++------------ tokio-postgres/src/lib.rs | 8 +-- tokio-postgres/src/prepare.rs | 34 ++++++------- tokio-postgres/src/query.rs | 23 ++++----- tokio-postgres/src/transaction.rs | 70 ++++++++++++-------------- tokio-postgres/tests/test/types/mod.rs | 4 ++ 6 files changed, 99 insertions(+), 102 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 56e131437..356da01a9 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -173,20 +173,20 @@ impl Client { /// /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), /// which are set when executed. Prepared statements can only be used with the connection that created them. - pub fn prepare(&mut self, query: &str) -> impl Future> { - self.prepare_typed(query, &[]) + pub async fn prepare(&self, query: &str) -> Result { + self.prepare_typed(query, &[]).await } /// Like `prepare`, but allows the types of query parameters to be explicitly specified. /// /// The list of types may be smaller than the number of parameters - the types of the remaining parameters will be /// inferred. For example, `client.prepare_typed(query, &[])` is equivalent to `client.prepare(query)`. - pub fn prepare_typed( - &mut self, + pub async fn prepare_typed( + &self, query: &str, parameter_types: &[Type], - ) -> impl Future> { - prepare::prepare(self.inner(), query, parameter_types) + ) -> Result { + prepare::prepare(&self.inner, query, parameter_types).await } /// Executes a statement, returning a stream of the resulting rows. @@ -194,29 +194,29 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn query( - &mut self, - statement: &Statement, - params: &[&(dyn ToSql + Sync)], - ) -> impl Stream> { + pub fn query<'a>( + &'a self, + statement: &'a Statement, + params: &'a [&'a (dyn ToSql + Sync)], + ) -> impl Stream> + 'a { let buf = query::encode(statement, params.iter().map(|s| *s as _)); - query::query(self.inner(), statement.clone(), buf) + query::query(&self.inner, statement, buf) } /// Like [`query`], but takes an iterator of parameters rather than a slice. /// /// [`query`]: #method.query pub fn query_iter<'a, I>( - &mut self, - statement: &Statement, + &'a self, + statement: &'a Statement, params: I, - ) -> impl Stream> + ) -> impl Stream> + 'a where - I: IntoIterator, + I: IntoIterator + 'a, I::IntoIter: ExactSizeIterator, { let buf = query::encode(statement, params); - query::query(self.inner(), statement.clone(), buf) + query::query(&self.inner, statement, buf) } /// Executes a statement, returning the number of rows modified. @@ -226,29 +226,29 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn execute( - &mut self, + pub async fn execute( + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], - ) -> impl Future> { + ) -> Result { let buf = query::encode(statement, params.iter().map(|s| *s as _)); - query::execute(self.inner(), buf) + query::execute(&self.inner, buf).await } /// Like [`execute`], but takes an iterator of parameters rather than a slice. /// /// [`execute`]: #method.execute - pub fn execute_iter<'a, I>( - &mut self, + pub async fn execute_iter<'a, I>( + &self, statement: &Statement, params: I, - ) -> impl Future> + ) -> Result where I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = query::encode(statement, params); - query::execute(self.inner(), buf) + query::execute(&self.inner, buf).await } /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. @@ -260,7 +260,7 @@ impl Client { /// /// Panics if the number of parameters provided does not match the number expected. pub fn copy_in( - &mut self, + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], stream: S, @@ -281,7 +281,7 @@ impl Client { /// /// Panics if the number of parameters provided does not match the number expected. pub fn copy_out( - &mut self, + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { @@ -303,7 +303,7 @@ impl Client { /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query( - &mut self, + &self, query: &str, ) -> impl Stream> { simple_query::simple_query(self.inner(), query) @@ -319,7 +319,7 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn batch_execute(&mut self, query: &str) -> impl Future> { + pub fn batch_execute(&self, query: &str) -> impl Future> { simple_query::batch_execute(self.inner(), query) } @@ -338,7 +338,7 @@ impl Client { /// /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] - pub fn cancel_query(&mut self, tls: T) -> impl Future> + pub fn cancel_query(&self, tls: T) -> impl Future> where T: MakeTlsConnect, { @@ -354,7 +354,7 @@ impl Client { /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. pub fn cancel_query_raw( - &mut self, + &self, stream: S, tls: T, ) -> impl Future> diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 023c4ad35..b2b120d22 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -79,14 +79,14 @@ //! use std::future::Future; //! use tokio_postgres::{Client, Error, Statement}; //! -//! fn pipelined_prepare( -//! client: &mut Client, -//! ) -> impl Future> +//! async fn pipelined_prepare( +//! client: &Client, +//! ) -> Result<(Statement, Statement), Error> //! { //! future::try_join( //! client.prepare("SELECT * FROM foo"), //! client.prepare("INSERT INTO bar (id, name) VALUES ($1, $2)") -//! ) +//! ).await //! } //! ``` //! diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index c3f70c41f..48f1f34eb 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -57,15 +57,14 @@ ORDER BY attnum static NEXT_ID: AtomicUsize = AtomicUsize::new(0); -pub fn prepare( - client: Arc, +pub async fn prepare( + client: &Arc, query: &str, types: &[Type], -) -> impl Future> + 'static { +) -> Result { let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); let buf = encode(&name, query, types); - async move { let buf = buf?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -103,14 +102,13 @@ pub fn prepare( } Ok(Statement::new(&client, name, parameters, columns)) - } } -fn prepare_rec( - client: Arc, - query: &str, - types: &[Type], -) -> Pin> + 'static + Send>> { +fn prepare_rec<'a>( + client: &'a Arc, + query: &'a str, + types: &'a [Type], +) -> Pin> + 'a + Send>> { Box::pin(prepare(client, query, types)) } @@ -135,7 +133,7 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { let stmt = typeinfo_statement(client).await?; let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); - let rows = query::query(client.clone(), stmt, buf); + let rows = query::query(client, &stmt, buf); pin_mut!(rows); let row = match rows.try_next().await? { @@ -190,10 +188,10 @@ async fn typeinfo_statement(client: &Arc) -> Result stmt, Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_TABLE) => { - prepare_rec(client.clone(), TYPEINFO_FALLBACK_QUERY, &[]).await? + prepare_rec(client, TYPEINFO_FALLBACK_QUERY, &[]).await? } Err(e) => return Err(e), }; @@ -206,7 +204,7 @@ async fn get_enum_variants(client: &Arc, oid: Oid) -> Result) -> Result stmt, Err(ref e) if e.code() == Some(&SqlState::UNDEFINED_COLUMN) => { - prepare_rec(client.clone(), TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await? + prepare_rec(client, TYPEINFO_ENUM_FALLBACK_QUERY, &[]).await? } Err(e) => return Err(e), }; @@ -233,7 +231,7 @@ async fn get_composite_fields(client: &Arc, oid: Oid) -> Result>() .await?; @@ -253,7 +251,7 @@ async fn typeinfo_composite_statement(client: &Arc) -> Result, - statement: Statement, +pub fn query<'a>( + client: &'a Arc, + statement: &'a Statement, buf: Result, Error>, -) -> impl Stream> { - start(client, buf) - .map_ok(|responses| Query { - statement, - responses, - }) - .try_flatten_stream() +) -> impl Stream> + 'a { + let f = async move { + let responses = start(client, buf).await?; + Ok(Query { statement: statement.clone(), responses }) + }; + f.try_flatten_stream() } pub fn query_portal( @@ -44,7 +43,7 @@ pub fn query_portal( start.try_flatten_stream() } -pub async fn execute(client: Arc, buf: Result, Error>) -> Result { +pub async fn execute(client: &InnerClient, buf: Result, Error>) -> Result { let mut responses = start(client, buf).await?; loop { @@ -67,7 +66,7 @@ pub async fn execute(client: Arc, buf: Result, Error>) -> R } } -async fn start(client: Arc, buf: Result, Error>) -> Result { +async fn start(client: &InnerClient, buf: Result, Error>) -> Result { let buf = buf?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 64b86abfb..17477ed0e 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -78,65 +78,61 @@ impl<'a> Transaction<'a> { } /// Like `Client::prepare`. - pub fn prepare(&mut self, query: &str) -> impl Future> { - self.client.prepare(query) + pub async fn prepare(&self, query: &str) -> Result { + self.client.prepare(query).await } /// Like `Client::prepare_typed`. - pub fn prepare_typed( - &mut self, + pub async fn prepare_typed( + &self, query: &str, parameter_types: &[Type], - ) -> impl Future> { - self.client.prepare_typed(query, parameter_types) + ) -> Result { + self.client.prepare_typed(query, parameter_types).await } /// Like `Client::query`. - pub fn query( - &mut self, - statement: &Statement, - params: &[&(dyn ToSql + Sync)], - ) -> impl Stream> { + pub fn query<'b>( + &'b self, + statement: &'b Statement, + params: &'b [&'b (dyn ToSql + Sync)], + ) -> impl Stream> + 'b { self.client.query(statement, params) } /// Like `Client::query_iter`. pub fn query_iter<'b, I>( - &mut self, - statement: &Statement, + &'b self, + statement: &'b Statement, params: I, - ) -> impl Stream> + 'static + ) -> impl Stream> + 'b where - I: IntoIterator, + I: IntoIterator + 'b, I::IntoIter: ExactSizeIterator, { - // https://github.com/rust-lang/rust/issues/63032 - let buf = query::encode(statement, params); - query::query(self.client.inner(), statement.clone(), buf) + self.client.query_iter(statement, params) } /// Like `Client::execute`. - pub fn execute( - &mut self, + pub async fn execute( + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], - ) -> impl Future> { - self.client.execute(statement, params) + ) -> Result { + self.client.execute(statement, params).await } /// Like `Client::execute_iter`. - pub fn execute_iter<'b, I>( - &mut self, + pub async fn execute_iter<'b, I>( + &self, statement: &Statement, params: I, - ) -> impl Future> + ) -> Result where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - // https://github.com/rust-lang/rust/issues/63032 - let buf = query::encode(statement, params); - query::execute(self.client.inner(), buf) + self.client.execute_iter(statement, params).await } /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. @@ -148,7 +144,7 @@ impl<'a> Transaction<'a> { /// /// Panics if the number of parameters provided does not match the number expected. pub fn bind( - &mut self, + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], ) -> impl Future> { @@ -161,7 +157,7 @@ impl<'a> Transaction<'a> { /// /// [`bind`]: #method.bind pub fn bind_iter<'b, I>( - &mut self, + &self, statement: &Statement, params: I, ) -> impl Future> @@ -178,7 +174,7 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all rows will be returned. pub fn query_portal( - &mut self, + &self, portal: &Portal, max_rows: i32, ) -> impl Stream> { @@ -187,7 +183,7 @@ impl<'a> Transaction<'a> { /// Like `Client::copy_in`. pub fn copy_in( - &mut self, + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], stream: S, @@ -203,7 +199,7 @@ impl<'a> Transaction<'a> { /// Like `Client::copy_out`. pub fn copy_out( - &mut self, + &self, statement: &Statement, params: &[&(dyn ToSql + Sync)], ) -> impl Stream> { @@ -212,20 +208,20 @@ impl<'a> Transaction<'a> { /// Like `Client::simple_query`. pub fn simple_query( - &mut self, + &self, query: &str, ) -> impl Stream> { self.client.simple_query(query) } /// Like `Client::batch_execute`. - pub fn batch_execute(&mut self, query: &str) -> impl Future> { + pub fn batch_execute(&self, query: &str) -> impl Future> { self.client.batch_execute(query) } /// Like `Client::cancel_query`. #[cfg(feature = "runtime")] - pub fn cancel_query(&mut self, tls: T) -> impl Future> + pub fn cancel_query(&self, tls: T) -> impl Future> where T: MakeTlsConnect, { @@ -234,7 +230,7 @@ impl<'a> Transaction<'a> { /// Like `Client::cancel_query_raw`. pub fn cancel_query_raw( - &mut self, + &self, stream: S, tls: T, ) -> impl Future> diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 7311ea28b..6f5f054c9 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -671,9 +671,11 @@ async fn check_send() { let f = client.query(&stmt, &[&"hello"]); is_send(&f); + drop(f); let f = client.execute(&stmt, &[&"hello"]); is_send(&f); + drop(f); let f = client.transaction(); is_send(&f); @@ -681,7 +683,9 @@ async fn check_send() { let f = trans.query(&stmt, &[&"hello"]); is_send(&f); + drop(f); let f = trans.execute(&stmt, &[&"hello"]); is_send(&f); + drop(f); } From 0d2d554122ed4492a658c53c67b6f4deefb1b2df Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 28 Sep 2019 14:42:00 -0400 Subject: [PATCH 259/819] Add a ToStatement trait in tokio-postgres --- postgres-native-tls/src/lib.rs | 2 +- postgres-openssl/src/lib.rs | 2 +- postgres/src/client.rs | 22 ++--- postgres/src/lib.rs | 6 +- postgres/src/to_statement.rs | 59 ------------- postgres/src/transaction.rs | 41 ++++----- tokio-postgres/src/bind.rs | 37 +++----- tokio-postgres/src/client.rs | 113 ++++++++++++++----------- tokio-postgres/src/copy_in.rs | 15 ++-- tokio-postgres/src/copy_out.rs | 29 ++++--- tokio-postgres/src/lib.rs | 11 ++- tokio-postgres/src/prepare.rs | 78 +++++++++-------- tokio-postgres/src/query.rs | 43 +++++++--- tokio-postgres/src/simple_query.rs | 47 ++++------ tokio-postgres/src/to_statement.rs | 49 +++++++++++ tokio-postgres/src/transaction.rs | 113 +++++++++++++------------ tokio-postgres/tests/test/main.rs | 69 +++++++++++---- tokio-postgres/tests/test/runtime.rs | 4 +- tokio-postgres/tests/test/types/mod.rs | 73 ++++------------ 19 files changed, 408 insertions(+), 405 deletions(-) delete mode 100644 postgres/src/to_statement.rs create mode 100644 tokio-postgres/src/to_statement.rs diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 7b45e7f69..a79fbebd7 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -38,7 +38,7 @@ //! .build()?; //! let connector = MakeTlsConnector::new(connector); //! -//! let mut client = postgres::Client::connect( +//! let client = postgres::Client::connect( //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 94b0fe50f..78f4620e1 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -30,7 +30,7 @@ //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); //! -//! let mut client = postgres::Client::connect( +//! let client = postgres::Client::connect( //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 4cf088a7c..a4157b3ec 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -84,8 +84,7 @@ impl Client { where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - executor::block_on(self.0.execute(&statement, params)) + executor::block_on(self.0.execute(query, params)) } /// Executes a statement, returning the resulting rows. @@ -154,14 +153,13 @@ impl Client { /// ``` pub fn query_iter<'a, T>( &'a mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], + query: &'a T, + params: &'a [&(dyn ToSql + Sync)], ) -> Result + 'a, Error> where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - Ok(Iter::new(self.0.query(&statement, params))) + Ok(Iter::new(self.0.query(query, params))) } /// Creates a new prepared statement. @@ -249,8 +247,7 @@ impl Client { T: ?Sized + ToStatement, R: Read + Unpin, { - let statement = query.__statement(self)?; - executor::block_on(self.0.copy_in(&statement, params, CopyInStream(reader))) + executor::block_on(self.0.copy_in(query, params, CopyInStream(reader))) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -274,14 +271,13 @@ impl Client { /// ``` pub fn copy_out<'a, T>( &'a mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], + query: &'a T, + params: &'a [&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - let stream = self.0.copy_out(&statement, params); + let stream = self.0.copy_out(query, params); CopyOutReader::new(stream) } @@ -314,7 +310,7 @@ impl Client { /// them to this method! pub fn simple_query_iter<'a>( &'a mut self, - query: &str, + query: &'a str, ) -> Result + 'a, Error> { Ok(Iter::new(self.0.simple_query(query))) } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index cf30fd26a..ae52f2a34 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -62,7 +62,9 @@ use tokio::runtime::{self, Runtime}; #[cfg(feature = "runtime")] pub use tokio_postgres::Socket; -pub use tokio_postgres::{error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement}; +pub use tokio_postgres::{ + error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement, ToStatement, +}; pub use crate::client::*; #[cfg(feature = "runtime")] @@ -73,7 +75,6 @@ pub use crate::error::Error; pub use crate::row::{Row, SimpleQueryRow}; #[doc(no_inline)] pub use crate::tls::NoTls; -pub use crate::to_statement::*; pub use crate::transaction::*; mod client; @@ -82,7 +83,6 @@ pub mod config; mod copy_in_stream; mod copy_out_reader; mod iter; -mod to_statement; mod transaction; #[cfg(feature = "runtime")] diff --git a/postgres/src/to_statement.rs b/postgres/src/to_statement.rs deleted file mode 100644 index a77ad28ac..000000000 --- a/postgres/src/to_statement.rs +++ /dev/null @@ -1,59 +0,0 @@ -use tokio_postgres::Error; - -use crate::{Client, Statement, Transaction}; - -mod sealed { - pub trait Sealed {} -} - -#[doc(hidden)] -pub trait Prepare { - fn prepare(&mut self, query: &str) -> Result; -} - -impl Prepare for Client { - fn prepare(&mut self, query: &str) -> Result { - self.prepare(query) - } -} - -impl<'a> Prepare for Transaction<'a> { - fn prepare(&mut self, query: &str) -> Result { - self.prepare(query) - } -} - -/// A trait abstracting over prepared and unprepared statements. -/// -/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which -/// was prepared previously. -/// -/// This trait is "sealed" and cannot be implemented by anything outside this crate. -pub trait ToStatement: sealed::Sealed { - #[doc(hidden)] - fn __statement(&self, client: &mut T) -> Result - where - T: Prepare; -} - -impl sealed::Sealed for str {} - -impl ToStatement for str { - fn __statement(&self, client: &mut T) -> Result - where - T: Prepare, - { - client.prepare(self) - } -} - -impl sealed::Sealed for Statement {} - -impl ToStatement for Statement { - fn __statement(&self, _: &mut T) -> Result - where - T: Prepare, - { - Ok(self.clone()) - } -} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 9ecbe75f6..ac30369e5 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -47,8 +47,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - executor::block_on(self.0.execute(&statement, params)) + executor::block_on(self.0.execute(query, params)) } /// Like `Client::query`. @@ -60,16 +59,15 @@ impl<'a> Transaction<'a> { } /// Like `Client::query_iter`. - pub fn query_iter( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + pub fn query_iter<'b, T>( + &'b mut self, + query: &'b T, + params: &'b [&(dyn ToSql + Sync)], + ) -> Result + 'b, Error> where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - Ok(Iter::new(self.0.query(&statement, params))) + Ok(Iter::new(self.0.query(query, params))) } /// Binds parameters to a statement, creating a "portal". @@ -86,8 +84,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - executor::block_on(self.0.bind(&statement, params)) + executor::block_on(self.0.bind(query, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -100,11 +97,11 @@ impl<'a> Transaction<'a> { /// Like `query_portal`, except that it returns a fallible iterator over the resulting rows rather than buffering /// the entire response in memory. - pub fn query_portal_iter( - &mut self, - portal: &Portal, + pub fn query_portal_iter<'b>( + &'b mut self, + portal: &'b Portal, max_rows: i32, - ) -> Result, Error> { + ) -> Result + 'b, Error> { Ok(Iter::new(self.0.query_portal(&portal, max_rows))) } @@ -119,21 +116,19 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, R: Read + Unpin, { - let statement = query.__statement(self)?; - executor::block_on(self.0.copy_in(&statement, params, CopyInStream(reader))) + executor::block_on(self.0.copy_in(query, params, CopyInStream(reader))) } /// Like `Client::copy_out`. pub fn copy_out<'b, T>( - &'a mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], + &'b mut self, + query: &'b T, + params: &'b [&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, { - let statement = query.__statement(self)?; - let stream = self.0.copy_out(&statement, params); + let stream = self.0.copy_out(query, params); CopyOutReader::new(stream) } @@ -145,7 +140,7 @@ impl<'a> Transaction<'a> { /// Like `Client::simple_query_iter`. pub fn simple_query_iter<'b>( &'b mut self, - query: &str, + query: &'b str, ) -> Result + 'b, Error> { Ok(Iter::new(self.0.simple_query(query))) } diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 997190a9d..018663c42 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -10,36 +10,25 @@ use std::sync::Arc; static NEXT_ID: AtomicUsize = AtomicUsize::new(0); -pub async fn bind( - client: Arc, +pub async fn bind<'a, I>( + client: &Arc, statement: Statement, - bind: Result, -) -> Result { - let bind = bind?; - - let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(bind.buf)))?; - - match responses.next().await? { - Message::BindComplete => {} - _ => return Err(Error::unexpected_message()), - } - - Ok(Portal::new(&client, bind.name, statement)) -} - -pub struct PendingBind { - buf: Vec, - name: String, -} - -pub fn encode<'a, I>(statement: &Statement, params: I) -> Result + params: I, +) -> Result where I: IntoIterator, I::IntoIter: ExactSizeIterator, { let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); - let mut buf = query::encode_bind(statement, params, &name)?; + let mut buf = query::encode_bind(&statement, params, &name)?; frontend::sync(&mut buf); - Ok(PendingBind { buf, name }) + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + match responses.next().await? { + Message::BindComplete => {} + _ => return Err(Error::unexpected_message()), + } + + Ok(Portal::new(client, name, statement)) } diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 356da01a9..29d4a331f 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,9 +3,11 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; +use crate::slice_iter; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; +use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; @@ -16,13 +18,12 @@ use crate::{Error, Statement}; use bytes::{Bytes, IntoBuf}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, Stream, TryStream}; +use futures::{future, Stream, TryFutureExt, TryStream}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; use std::collections::HashMap; use std::error; -use std::future::Future; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -160,8 +161,8 @@ impl Client { } } - pub(crate) fn inner(&self) -> Arc { - self.inner.clone() + pub(crate) fn inner(&self) -> &Arc { + &self.inner } #[cfg(feature = "runtime")] @@ -194,29 +195,35 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn query<'a>( + pub fn query<'a, T>( &'a self, - statement: &'a Statement, - params: &'a [&'a (dyn ToSql + Sync)], - ) -> impl Stream> + 'a { - let buf = query::encode(statement, params.iter().map(|s| *s as _)); - query::query(&self.inner, statement, buf) + statement: &'a T, + params: &'a [&(dyn ToSql + Sync)], + ) -> impl Stream> + 'a + where + T: ?Sized + ToStatement, + { + self.query_iter(statement, slice_iter(params)) } /// Like [`query`], but takes an iterator of parameters rather than a slice. /// /// [`query`]: #method.query - pub fn query_iter<'a, I>( + pub fn query_iter<'a, T, I>( &'a self, - statement: &'a Statement, + statement: &'a T, params: I, ) -> impl Stream> + 'a where + T: ?Sized + ToStatement, I: IntoIterator + 'a, I::IntoIter: ExactSizeIterator, { - let buf = query::encode(statement, params); - query::query(&self.inner, statement, buf) + let f = async move { + let statement = statement.__convert().into_statement(self).await?; + Ok(query::query(&self.inner, statement, params)) + }; + f.try_flatten_stream() } /// Executes a statement, returning the number of rows modified. @@ -226,29 +233,28 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub async fn execute( + pub async fn execute( &self, - statement: &Statement, + statement: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result { - let buf = query::encode(statement, params.iter().map(|s| *s as _)); - query::execute(&self.inner, buf).await + ) -> Result + where + T: ?Sized + ToStatement, + { + self.execute_iter(statement, slice_iter(params)).await } /// Like [`execute`], but takes an iterator of parameters rather than a slice. /// /// [`execute`]: #method.execute - pub async fn execute_iter<'a, I>( - &self, - statement: &Statement, - params: I, - ) -> Result + pub async fn execute_iter<'a, T, I>(&self, statement: &T, params: I) -> Result where + T: ?Sized + ToStatement, I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = query::encode(statement, params); - query::execute(&self.inner, buf).await + let statement = statement.__convert().into_statement(self).await?; + query::execute(self.inner(), statement, params).await } /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. @@ -259,20 +265,22 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn copy_in( + pub async fn copy_in( &self, - statement: &Statement, + statement: &T, params: &[&(dyn ToSql + Sync)], stream: S, - ) -> impl Future> + ) -> Result where + T: ?Sized + ToStatement, S: TryStream, S::Ok: IntoBuf, ::Buf: 'static + Send, S::Error: Into>, { - let buf = query::encode(statement, params.iter().map(|s| *s as _)); - copy_in::copy_in(self.inner(), buf, stream) + let statement = statement.__convert().into_statement(self).await?; + let params = slice_iter(params); + copy_in::copy_in(self.inner(), statement, params, stream).await } /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. @@ -280,13 +288,20 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn copy_out( - &self, - statement: &Statement, - params: &[&(dyn ToSql + Sync)], - ) -> impl Stream> { - let buf = query::encode(statement, params.iter().map(|s| *s as _)); - copy_out::copy_out(self.inner(), buf) + pub fn copy_out<'a, T>( + &'a self, + statement: &'a T, + params: &'a [&(dyn ToSql + Sync)], + ) -> impl Stream> + 'a + where + T: ?Sized + ToStatement, + { + let f = async move { + let statement = statement.__convert().into_statement(self).await?; + let params = slice_iter(params); + Ok(copy_out::copy_out(self.inner(), statement, params)) + }; + f.try_flatten_stream() } /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. @@ -302,10 +317,10 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn simple_query( - &self, - query: &str, - ) -> impl Stream> { + pub fn simple_query<'a>( + &'a self, + query: &'a str, + ) -> impl Stream> + 'a { simple_query::simple_query(self.inner(), query) } @@ -319,8 +334,8 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn batch_execute(&self, query: &str) -> impl Future> { - simple_query::batch_execute(self.inner(), query) + pub async fn batch_execute(&self, query: &str) -> Result<(), Error> { + simple_query::batch_execute(self.inner(), query).await } /// Begins a new database transaction. @@ -338,7 +353,7 @@ impl Client { /// /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] - pub fn cancel_query(&self, tls: T) -> impl Future> + pub async fn cancel_query(&self, tls: T) -> Result<(), Error> where T: MakeTlsConnect, { @@ -349,15 +364,12 @@ impl Client { self.process_id, self.secret_key, ) + .await } /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. - pub fn cancel_query_raw( - &self, - stream: S, - tls: T, - ) -> impl Future> + pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, @@ -369,6 +381,7 @@ impl Client { self.process_id, self.secret_key, ) + .await } /// Determines if the connection to the server has already closed. diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index c40df728a..239f16663 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,7 +1,8 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::Error; +use crate::types::ToSql; +use crate::{query, Error, Statement}; use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use futures::channel::mpsc; use futures::ready; @@ -12,7 +13,6 @@ use postgres_protocol::message::frontend; use postgres_protocol::message::frontend::CopyData; use std::error; use std::pin::Pin; -use std::sync::Arc; use std::task::{Context, Poll}; enum CopyInMessage { @@ -62,18 +62,21 @@ impl Stream for CopyInReceiver { } } -pub async fn copy_in( - client: Arc, - buf: Result, Error>, +pub async fn copy_in<'a, I, S>( + client: &InnerClient, + statement: Statement, + params: I, stream: S, ) -> Result where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, S: TryStream, S::Ok: IntoBuf, ::Buf: 'static + Send, S::Error: Into>, { - let buf = buf?; + let buf = query::encode(&statement, params)?; let (mut sender, receiver) = mpsc::channel(1); let receiver = CopyInReceiver::new(receiver); diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 670c08f29..30c85ee71 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,25 +1,32 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::Error; +use crate::types::ToSql; +use crate::{query, Error, Statement}; use bytes::Bytes; use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use std::pin::Pin; -use std::sync::Arc; use std::task::{Context, Poll}; -pub fn copy_out( - client: Arc, - buf: Result, Error>, -) -> impl Stream> { - start(client, buf) - .map_ok(|responses| CopyOut { responses }) - .try_flatten_stream() +pub fn copy_out<'a, I>( + client: &'a InnerClient, + statement: Statement, + params: I, +) -> impl Stream> + 'a +where + I: IntoIterator + 'a, + I::IntoIter: ExactSizeIterator, +{ + let f = async move { + let buf = query::encode(&statement, params)?; + let responses = start(client, buf).await?; + Ok(CopyOut { responses }) + }; + f.try_flatten_stream() } -async fn start(client: Arc, buf: Result, Error>) -> Result { - let buf = buf?; +async fn start(client: &InnerClient, buf: Vec) -> Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; match responses.next().await? { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b2b120d22..0a3fa6aef 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -114,11 +114,13 @@ pub use crate::portal::Portal; pub use crate::row::{Row, SimpleQueryRow}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; +pub use crate::statement::{Column, Statement}; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; +pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; -pub use statement::{Column, Statement}; +use crate::types::ToSql; mod bind; #[cfg(feature = "runtime")] @@ -147,6 +149,7 @@ mod simple_query; mod socket; mod statement; pub mod tls; +mod to_statement; mod transaction; pub mod types; @@ -220,3 +223,9 @@ pub enum SimpleQueryMessage { #[doc(hidden)] __NonExhaustive, } + +fn slice_iter<'a>( + s: &'a [&'a (dyn ToSql + Sync)], +) -> impl ExactSizeIterator + 'a { + s.iter().map(|s| *s as _) +} diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 48f1f34eb..f3c187124 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -2,8 +2,8 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::error::SqlState; -use crate::query; -use crate::types::{Field, Kind, Oid, ToSql, Type}; +use crate::types::{Field, Kind, Oid, Type}; +use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; use futures::{future, TryStreamExt}; @@ -65,43 +65,43 @@ pub async fn prepare( let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); let buf = encode(&name, query, types); - let buf = buf?; - let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + let buf = buf?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - match responses.next().await? { - Message::ParseComplete => {} - _ => return Err(Error::unexpected_message()), - } + match responses.next().await? { + Message::ParseComplete => {} + _ => return Err(Error::unexpected_message()), + } - let parameter_description = match responses.next().await? { - Message::ParameterDescription(body) => body, - _ => return Err(Error::unexpected_message()), - }; - - let row_description = match responses.next().await? { - Message::RowDescription(body) => Some(body), - Message::NoData => None, - _ => return Err(Error::unexpected_message()), - }; - - let mut parameters = vec![]; - let mut it = parameter_description.parameters(); - while let Some(oid) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, oid).await?; - parameters.push(type_); - } + let parameter_description = match responses.next().await? { + Message::ParameterDescription(body) => body, + _ => return Err(Error::unexpected_message()), + }; - let mut columns = vec![]; - if let Some(row_description) = row_description { - let mut it = row_description.fields(); - while let Some(field) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, field.type_oid()).await?; - let column = Column::new(field.name().to_string(), type_); - columns.push(column); - } + let row_description = match responses.next().await? { + Message::RowDescription(body) => Some(body), + Message::NoData => None, + _ => return Err(Error::unexpected_message()), + }; + + let mut parameters = vec![]; + let mut it = parameter_description.parameters(); + while let Some(oid) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, oid).await?; + parameters.push(type_); + } + + let mut columns = vec![]; + if let Some(row_description) = row_description { + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(&client, field.type_oid()).await?; + let column = Column::new(field.name().to_string(), type_); + columns.push(column); } + } - Ok(Statement::new(&client, name, parameters, columns)) + Ok(Statement::new(&client, name, parameters, columns)) } fn prepare_rec<'a>( @@ -132,8 +132,8 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { let stmt = typeinfo_statement(client).await?; - let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); - let rows = query::query(client, &stmt, buf); + let params = &[&oid as _]; + let rows = query::query(client, stmt, slice_iter(params)); pin_mut!(rows); let row = match rows.try_next().await? { @@ -203,8 +203,7 @@ async fn typeinfo_statement(client: &Arc) -> Result, oid: Oid) -> Result, Error> { let stmt = typeinfo_enum_statement(client).await?; - let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); - query::query(client, &stmt, buf) + query::query(client, stmt, slice_iter(&[&oid])) .and_then(|row| future::ready(row.try_get(0))) .try_collect() .await @@ -230,8 +229,7 @@ async fn typeinfo_enum_statement(client: &Arc) -> Result, oid: Oid) -> Result, Error> { let stmt = typeinfo_composite_statement(client).await?; - let buf = query::encode(&stmt, (&[&oid as &dyn ToSql]).iter().cloned()); - let rows = query::query(client, &stmt, buf) + let rows = query::query(client, stmt, slice_iter(&[&oid])) .try_collect::>() .await?; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index e048979a8..ee04866b6 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -7,26 +7,33 @@ use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; -use std::sync::Arc; use std::task::{Context, Poll}; -pub fn query<'a>( - client: &'a Arc, - statement: &'a Statement, - buf: Result, Error>, -) -> impl Stream> + 'a { +pub fn query<'a, I>( + client: &'a InnerClient, + statement: Statement, + params: I, +) -> impl Stream> + 'a +where + I: IntoIterator + 'a, + I::IntoIter: ExactSizeIterator, +{ let f = async move { + let buf = encode(&statement, params)?; let responses = start(client, buf).await?; - Ok(Query { statement: statement.clone(), responses }) + Ok(Query { + statement, + responses, + }) }; f.try_flatten_stream() } -pub fn query_portal( - client: Arc, - portal: Portal, +pub fn query_portal<'a>( + client: &'a InnerClient, + portal: &'a Portal, max_rows: i32, -) -> impl Stream> { +) -> impl Stream> + 'a { let start = async move { let mut buf = vec![]; frontend::execute(portal.name(), max_rows, &mut buf).map_err(Error::encode)?; @@ -43,7 +50,16 @@ pub fn query_portal( start.try_flatten_stream() } -pub async fn execute(client: &InnerClient, buf: Result, Error>) -> Result { +pub async fn execute<'a, I>( + client: &InnerClient, + statement: Statement, + params: I, +) -> Result +where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let buf = encode(&statement, params)?; let mut responses = start(client, buf).await?; loop { @@ -66,8 +82,7 @@ pub async fn execute(client: &InnerClient, buf: Result, Error>) -> Resul } } -async fn start(client: &InnerClient, buf: Result, Error>) -> Result { - let buf = buf?; +async fn start(client: &InnerClient, buf: Vec) -> Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; match responses.next().await? { diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 383dad0ca..d1e3bfd2c 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -6,19 +6,16 @@ use fallible_iterator::FallibleIterator; use futures::{ready, Stream, TryFutureExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; -use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -pub fn simple_query( - client: Arc, - query: &str, -) -> impl Stream> { - let buf = encode(query); - - let start = async move { - let buf = buf?; +pub fn simple_query<'a>( + client: &'a InnerClient, + query: &'a str, +) -> impl Stream> + 'a { + let f = async move { + let buf = encode(query)?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; Ok(SimpleQuery { @@ -26,29 +23,21 @@ pub fn simple_query( columns: None, }) }; - - start.try_flatten_stream() + f.try_flatten_stream() } -pub fn batch_execute( - client: Arc, - query: &str, -) -> impl Future> { - let buf = encode(query); - - async move { - let buf = buf?; - let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; +pub async fn batch_execute(client: &InnerClient, query: &str) -> Result<(), Error> { + let buf = encode(query)?; + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - loop { - match responses.next().await? { - Message::ReadyForQuery(_) => return Ok(()), - Message::CommandComplete(_) - | Message::EmptyQueryResponse - | Message::RowDescription(_) - | Message::DataRow(_) => {} - _ => return Err(Error::unexpected_message()), - } + loop { + match responses.next().await? { + Message::ReadyForQuery(_) => return Ok(()), + Message::CommandComplete(_) + | Message::EmptyQueryResponse + | Message::RowDescription(_) + | Message::DataRow(_) => {} + _ => return Err(Error::unexpected_message()), } } } diff --git a/tokio-postgres/src/to_statement.rs b/tokio-postgres/src/to_statement.rs new file mode 100644 index 000000000..3b4026e56 --- /dev/null +++ b/tokio-postgres/src/to_statement.rs @@ -0,0 +1,49 @@ +use crate::to_statement::private::{Sealed, ToStatementType}; +use crate::Statement; + +mod private { + use crate::{Client, Error, Statement}; + + pub trait Sealed {} + + pub enum ToStatementType<'a> { + Statement(&'a Statement), + Query(&'a str), + } + + impl<'a> ToStatementType<'a> { + pub async fn into_statement(self, client: &Client) -> Result { + match self { + ToStatementType::Statement(s) => Ok(s.clone()), + ToStatementType::Query(s) => client.prepare(s).await, + } + } + } +} + +/// A trait abstracting over prepared and unprepared statements. +/// +/// Many methods are generic over this bound, so that they support both a raw query string as well as a statement which +/// was prepared previously. +/// +/// This trait is "sealed" and cannot be implemented by anything outside this crate. +pub trait ToStatement: private::Sealed { + #[doc(hidden)] + fn __convert(&self) -> ToStatementType<'_>; +} + +impl ToStatement for Statement { + fn __convert(&self) -> ToStatementType<'_> { + ToStatementType::Statement(self) + } +} + +impl Sealed for Statement {} + +impl ToStatement for str { + fn __convert(&self) -> ToStatementType<'_> { + ToStatementType::Query(self) + } +} + +impl Sealed for str {} diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 17477ed0e..5f9dc8fd8 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -6,12 +6,13 @@ use crate::tls::TlsConnect; use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{bind, query, Client, Error, Portal, Row, SimpleQueryMessage, Statement}; +use crate::{ + bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, +}; use bytes::{Bytes, IntoBuf}; use futures::{Stream, TryStream}; use postgres_protocol::message::frontend; use std::error; -use std::future::Future; use tokio::io::{AsyncRead, AsyncWrite}; /// A representation of a PostgreSQL database transaction. @@ -92,21 +93,25 @@ impl<'a> Transaction<'a> { } /// Like `Client::query`. - pub fn query<'b>( + pub fn query<'b, T>( &'b self, - statement: &'b Statement, - params: &'b [&'b (dyn ToSql + Sync)], - ) -> impl Stream> + 'b { + statement: &'b T, + params: &'b [&(dyn ToSql + Sync)], + ) -> impl Stream> + 'b + where + T: ?Sized + ToStatement, + { self.client.query(statement, params) } /// Like `Client::query_iter`. - pub fn query_iter<'b, I>( + pub fn query_iter<'b, T, I>( &'b self, - statement: &'b Statement, + statement: &'b T, params: I, ) -> impl Stream> + 'b where + T: ?Sized + ToStatement, I: IntoIterator + 'b, I::IntoIter: ExactSizeIterator, { @@ -114,21 +119,25 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute`. - pub async fn execute( + pub async fn execute( &self, - statement: &Statement, + statement: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result { + ) -> Result + where + T: ?Sized + ToStatement, + { self.client.execute(statement, params).await } /// Like `Client::execute_iter`. - pub async fn execute_iter<'b, I>( + pub async fn execute_iter<'b, I, T>( &self, statement: &Statement, params: I, ) -> Result where + T: ?Sized + ToStatement, I: IntoIterator, I::IntoIter: ExactSizeIterator, { @@ -143,102 +152,100 @@ impl<'a> Transaction<'a> { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn bind( + pub async fn bind( &self, - statement: &Statement, + statement: &T, params: &[&(dyn ToSql + Sync)], - ) -> impl Future> { - // https://github.com/rust-lang/rust/issues/63032 - let buf = bind::encode(statement, params.iter().map(|s| *s as _)); - bind::bind(self.client.inner(), statement.clone(), buf) + ) -> Result + where + T: ?Sized + ToStatement, + { + self.bind_iter(statement, slice_iter(params)).await } /// Like [`bind`], but takes an iterator of parameters rather than a slice. /// /// [`bind`]: #method.bind - pub fn bind_iter<'b, I>( - &self, - statement: &Statement, - params: I, - ) -> impl Future> + pub async fn bind_iter<'b, T, I>(&self, statement: &T, params: I) -> Result where + T: ?Sized + ToStatement, I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = bind::encode(statement, params); - bind::bind(self.client.inner(), statement.clone(), buf) + let statement = statement.__convert().into_statement(&self.client).await?; + bind::bind(self.client.inner(), statement, params).await } /// Continues execution of a portal, returning a stream of the resulting rows. /// /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all rows will be returned. - pub fn query_portal( - &self, - portal: &Portal, + pub fn query_portal<'b>( + &'b self, + portal: &'b Portal, max_rows: i32, - ) -> impl Stream> { - query::query_portal(self.client.inner(), portal.clone(), max_rows) + ) -> impl Stream> + 'b { + query::query_portal(self.client.inner(), portal, max_rows) } /// Like `Client::copy_in`. - pub fn copy_in( + pub async fn copy_in( &self, - statement: &Statement, + statement: &T, params: &[&(dyn ToSql + Sync)], stream: S, - ) -> impl Future> + ) -> Result where + T: ?Sized + ToStatement, S: TryStream, S::Ok: IntoBuf, ::Buf: 'static + Send, S::Error: Into>, { - self.client.copy_in(statement, params, stream) + self.client.copy_in(statement, params, stream).await } /// Like `Client::copy_out`. - pub fn copy_out( - &self, - statement: &Statement, - params: &[&(dyn ToSql + Sync)], - ) -> impl Stream> { + pub fn copy_out<'b, T>( + &'b self, + statement: &'b T, + params: &'b [&(dyn ToSql + Sync)], + ) -> impl Stream> + 'b + where + T: ?Sized + ToStatement, + { self.client.copy_out(statement, params) } /// Like `Client::simple_query`. - pub fn simple_query( - &self, - query: &str, - ) -> impl Stream> { + pub fn simple_query<'b>( + &'b self, + query: &'b str, + ) -> impl Stream> + 'b { self.client.simple_query(query) } /// Like `Client::batch_execute`. - pub fn batch_execute(&self, query: &str) -> impl Future> { - self.client.batch_execute(query) + pub async fn batch_execute(&self, query: &str) -> Result<(), Error> { + self.client.batch_execute(query).await } /// Like `Client::cancel_query`. #[cfg(feature = "runtime")] - pub fn cancel_query(&self, tls: T) -> impl Future> + pub async fn cancel_query(&self, tls: T) -> Result<(), Error> where T: MakeTlsConnect, { - self.client.cancel_query(tls) + self.client.cancel_query(tls).await } /// Like `Client::cancel_query_raw`. - pub fn cancel_query_raw( - &self, - stream: S, - tls: T, - ) -> impl Future> + pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - self.client.cancel_query_raw(stream, tls) + self.client.cancel_query_raw(stream, tls).await } /// Like `Client::transaction`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 802e9149b..f3b37e968 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -98,7 +98,7 @@ async fn scram_password_ok() { #[tokio::test] async fn pipelined_prepare() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let prepare1 = client.prepare("SELECT $1::HSTORE[]"); let prepare2 = client.prepare("SELECT $1::BIGINT"); @@ -114,7 +114,7 @@ async fn pipelined_prepare() { #[tokio::test] async fn insert_select() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TEMPORARY TABLE foo (id SERIAL, name TEXT)") @@ -138,7 +138,7 @@ async fn insert_select() { #[tokio::test] async fn custom_enum() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -167,7 +167,7 @@ async fn custom_enum() { #[tokio::test] async fn custom_domain() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute("CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16)") @@ -183,7 +183,7 @@ async fn custom_domain() { #[tokio::test] async fn custom_array() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let select = client.prepare("SELECT $1::HSTORE[]").await.unwrap(); @@ -200,7 +200,7 @@ async fn custom_array() { #[tokio::test] async fn custom_composite() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -232,7 +232,7 @@ async fn custom_composite() { #[tokio::test] async fn custom_range() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -253,7 +253,7 @@ async fn custom_range() { #[tokio::test] async fn simple_query() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let messages = client .simple_query( @@ -299,7 +299,7 @@ async fn simple_query() { #[tokio::test] async fn cancel_query_raw() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let cancel = client.cancel_query_raw(socket, NoTls); @@ -327,7 +327,7 @@ async fn transaction_commit() { .await .unwrap(); - let mut transaction = client.transaction().await.unwrap(); + let transaction = client.transaction().await.unwrap(); transaction .batch_execute("INSERT INTO foo (name) VALUES ('steven')") .await @@ -359,7 +359,7 @@ async fn transaction_rollback() { .await .unwrap(); - let mut transaction = client.transaction().await.unwrap(); + let transaction = client.transaction().await.unwrap(); transaction .batch_execute("INSERT INTO foo (name) VALUES ('steven')") .await @@ -390,7 +390,7 @@ async fn transaction_rollback_drop() { .await .unwrap(); - let mut transaction = client.transaction().await.unwrap(); + let transaction = client.transaction().await.unwrap(); transaction .batch_execute("INSERT INTO foo (name) VALUES ('steven')") .await @@ -409,7 +409,7 @@ async fn transaction_rollback_drop() { #[tokio::test] async fn copy_in() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -449,7 +449,7 @@ async fn copy_in() { #[tokio::test] async fn copy_in_large() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -480,7 +480,7 @@ async fn copy_in_large() { #[tokio::test] async fn copy_in_error() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -511,7 +511,7 @@ async fn copy_in_error() { #[tokio::test] async fn copy_out() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -532,7 +532,7 @@ async fn copy_out() { #[tokio::test] async fn notifications() { - let (mut client, mut connection) = connect_raw("user=postgres").await.unwrap(); + let (client, mut connection) = connect_raw("user=postgres").await.unwrap(); let (tx, rx) = mpsc::unbounded(); let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); @@ -585,7 +585,7 @@ async fn query_portal() { .await .unwrap(); - let mut transaction = client.transaction().await.unwrap(); + let transaction = client.transaction().await.unwrap(); let portal = transaction.bind(&stmt, &[]).await.unwrap(); let f1 = transaction.query_portal(&portal, 2).try_collect::>(); @@ -624,3 +624,36 @@ async fn prefer_channel_binding() { async fn disable_channel_binding() { connect("user=postgres channel_binding=disable").await; } + +#[tokio::test] +async fn check_send() { + fn is_send(_: &T) {} + + let f = connect("user=postgres"); + is_send(&f); + let mut client = f.await; + + let f = client.prepare("SELECT $1::TEXT"); + is_send(&f); + let stmt = f.await.unwrap(); + + let f = client.query(&stmt, &[&"hello"]); + is_send(&f); + drop(f); + + let f = client.execute(&stmt, &[&"hello"]); + is_send(&f); + drop(f); + + let f = client.transaction(); + is_send(&f); + let trans = f.await.unwrap(); + + let f = trans.query(&stmt, &[&"hello"]); + is_send(&f); + drop(f); + + let f = trans.execute(&stmt, &[&"hello"]); + is_send(&f); + drop(f); +} diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 2d3f233e0..07f0ed4fb 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -13,7 +13,7 @@ async fn connect(s: &str) -> Client { } async fn smoke_test(s: &str) { - let mut client = connect(s).await; + let client = connect(s).await; let stmt = client.prepare("SELECT $1::INT").await.unwrap(); let rows = client @@ -72,7 +72,7 @@ async fn target_session_attrs_err() { #[tokio::test] async fn cancel_query() { - let mut client = connect("host=localhost port=5433 user=postgres").await; + let client = connect("host=localhost port=5433 user=postgres").await; let cancel = client.cancel_query(NoTls); let cancel = timer::delay(Instant::now() + Duration::from_millis(100)).then(|()| cancel); diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 6f5f054c9..6f7dd5eb7 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -1,4 +1,5 @@ use futures::TryStreamExt; +use postgres_types::to_sql_checked; use std::collections::HashMap; use std::error::Error; use std::f32; @@ -7,7 +8,6 @@ use std::fmt; use std::net::IpAddr; use std::result; use std::time::{Duration, UNIX_EPOCH}; -use postgres_types::to_sql_checked; use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; use crate::connect; @@ -30,27 +30,19 @@ where T: PartialEq + for<'a> FromSqlOwned + ToSql + Sync, S: fmt::Display, { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; for (val, repr) in checks { - let stmt = client - .prepare(&format!("SELECT {}::{}", repr, sql_type)) - .await - .unwrap(); let rows = client - .query(&stmt, &[]) + .query(&*format!("SELECT {}::{}", repr, sql_type), &[]) .try_collect::>() .await .unwrap(); let result = rows[0].get(0); assert_eq!(val, &result); - let stmt = client - .prepare(&format!("SELECT $1::{}", sql_type)) - .await - .unwrap(); let rows = client - .query(&stmt, &[&val]) + .query(&*format!("SELECT $1::{}", sql_type), &[&val]) .try_collect::>() .await .unwrap(); @@ -203,7 +195,7 @@ async fn test_text_params() { #[tokio::test] async fn test_borrowed_text() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let stmt = client.prepare("SELECT 'foo'").await.unwrap(); let rows = client @@ -217,7 +209,7 @@ async fn test_borrowed_text() { #[tokio::test] async fn test_bpchar_params() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -257,7 +249,7 @@ async fn test_bpchar_params() { #[tokio::test] async fn test_citext_params() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -306,7 +298,7 @@ async fn test_bytea_params() { #[tokio::test] async fn test_borrowed_bytea() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let stmt = client.prepare("SELECT 'foo'::BYTEA").await.unwrap(); let rows = client .query(&stmt, &[]) @@ -365,7 +357,7 @@ async fn test_nan_param(sql_type: &str) where T: PartialEq + ToSql + FromSqlOwned, { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let stmt = client .prepare(&format!("SELECT 'NaN'::{}", sql_type)) @@ -392,7 +384,7 @@ async fn test_f64_nan_param() { #[tokio::test] async fn test_pg_database_datname() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let stmt = client .prepare("SELECT datname FROM pg_database") .await @@ -407,7 +399,7 @@ async fn test_pg_database_datname() { #[tokio::test] async fn test_slice() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -436,7 +428,7 @@ async fn test_slice() { #[tokio::test] async fn test_slice_wrong_type() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -465,7 +457,7 @@ async fn test_slice_wrong_type() { #[tokio::test] async fn test_slice_range() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; let stmt = client.prepare("SELECT $1::INT8RANGE").await.unwrap(); let err = client @@ -520,7 +512,7 @@ async fn domain() { } } - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -551,7 +543,7 @@ async fn domain() { #[tokio::test] async fn composite() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute( @@ -582,7 +574,7 @@ async fn composite() { #[tokio::test] async fn enum_() { - let mut client = connect("user=postgres").await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy')") @@ -656,36 +648,3 @@ async fn inet() { ) .await; } - -#[tokio::test] -async fn check_send() { - fn is_send(_: &T) {} - - let f = connect("user=postgres"); - is_send(&f); - let mut client = f.await; - - let f = client.prepare("SELECT $1::TEXT"); - is_send(&f); - let stmt = f.await.unwrap(); - - let f = client.query(&stmt, &[&"hello"]); - is_send(&f); - drop(f); - - let f = client.execute(&stmt, &[&"hello"]); - is_send(&f); - drop(f); - - let f = client.transaction(); - is_send(&f); - let mut trans = f.await.unwrap(); - - let f = trans.query(&stmt, &[&"hello"]); - is_send(&f); - drop(f); - - let f = trans.execute(&stmt, &[&"hello"]); - is_send(&f); - drop(f); -} From 1473c09b83a9ce583042e85aee524b2f80799ecf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 16:45:48 -0700 Subject: [PATCH 260/819] Fix warnings --- postgres-native-tls/src/test.rs | 4 ++-- postgres-openssl/src/test.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 416a3c14d..45fc08a0f 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -15,7 +15,7 @@ where let stream = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let builder = s.parse::().unwrap(); - let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); + let (client, connection) = builder.connect_raw(stream, tls).await.unwrap(); let connection = connection.map(|r| r.unwrap()); tokio::spawn(connection); @@ -87,7 +87,7 @@ async fn runtime() { .unwrap(); let connector = MakeTlsConnector::new(connector); - let (mut client, connection) = tokio_postgres::connect( + let (client, connection) = tokio_postgres::connect( "host=localhost port=5433 user=postgres sslmode=require", connector, ) diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index e3ee454ea..9f29bab1b 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -13,7 +13,7 @@ where let stream = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let builder = s.parse::().unwrap(); - let (mut client, connection) = builder.connect_raw(stream, tls).await.unwrap(); + let (client, connection) = builder.connect_raw(stream, tls).await.unwrap(); let connection = connection.map(|r| r.unwrap()); tokio::spawn(connection); @@ -98,7 +98,7 @@ async fn runtime() { builder.set_ca_file("../test/server.crt").unwrap(); let connector = MakeTlsConnector::new(builder.build()); - let (mut client, connection) = tokio_postgres::connect( + let (client, connection) = tokio_postgres::connect( "host=localhost port=5433 user=postgres sslmode=require", connector, ) From 251710013247c3686099380a99edb94b4559560e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 17:15:41 -0700 Subject: [PATCH 261/819] Overhaul query This is the template that we'll use for all other methods taking parameters. The `foo_raw` variant is the most flexible (but annoying to use), while `foo` covers the expected common case. --- postgres-native-tls/src/test.rs | 4 +- postgres-openssl/src/test.rs | 4 +- postgres/src/client.rs | 26 ++++++++----- postgres/src/transaction.rs | 19 ++++++---- tokio-postgres/src/client.rs | 51 ++++++++++++++------------ tokio-postgres/src/lib.rs | 3 +- tokio-postgres/src/prepare.rs | 9 +++-- tokio-postgres/src/query.rs | 29 +++++++-------- tokio-postgres/src/transaction.rs | 29 +++++++-------- tokio-postgres/tests/test/main.rs | 7 +--- tokio-postgres/tests/test/runtime.rs | 3 +- tokio-postgres/tests/test/types/mod.rs | 31 ++++++---------- 12 files changed, 103 insertions(+), 112 deletions(-) diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 45fc08a0f..6eb27b23e 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,4 +1,4 @@ -use futures::{FutureExt, TryStreamExt}; +use futures::{FutureExt}; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; @@ -23,7 +23,6 @@ where let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .try_collect::>() .await .unwrap(); @@ -99,7 +98,6 @@ async fn runtime() { let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .try_collect::>() .await .unwrap(); diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index 9f29bab1b..eb3e5e295 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -1,4 +1,4 @@ -use futures::{FutureExt, TryStreamExt}; +use futures::{FutureExt}; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; @@ -21,7 +21,6 @@ where let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .try_collect::>() .await .unwrap(); @@ -110,7 +109,6 @@ async fn runtime() { let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .try_collect::>() .await .unwrap(); diff --git a/postgres/src/client.rs b/postgres/src/client.rs index a4157b3ec..f99afa1c3 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -122,11 +122,13 @@ impl Client { where T: ?Sized + ToStatement, { - self.query_iter(query, params)?.collect() + executor::block_on(self.0.query(query, params)) } - /// Like `query`, except that it returns a fallible iterator over the resulting rows rather than buffering the - /// response in memory. + /// A maximally-flexible version of `query`. + /// + /// It takes an iterator of parameters rather than a slice, and returns an iterator of rows rather than collecting + /// them into an array. /// /// # Panics /// @@ -137,12 +139,13 @@ impl Client { /// ```no_run /// use postgres::{Client, NoTls}; /// use fallible_iterator::FallibleIterator; + /// use std::iter; /// /// # fn main() -> Result<(), postgres::Error> { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// /// let baz = true; - /// let mut it = client.query_iter("SELECT foo FROM bar WHERE baz = $1", &[&baz])?; + /// let mut it = client.query_raw("SELECT foo FROM bar WHERE baz = $1", iter::once(&baz as _))?; /// /// while let Some(row) = it.next()? { /// let foo: i32 = row.get("foo"); @@ -151,15 +154,18 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query_iter<'a, T>( - &'a mut self, - query: &'a T, - params: &'a [&(dyn ToSql + Sync)], - ) -> Result + 'a, Error> + pub fn query_raw<'a, T, I>( + &mut self, + query: &T, + params: I, + ) -> Result, Error> where T: ?Sized + ToStatement, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, { - Ok(Iter::new(self.0.query(query, params))) + let stream = executor::block_on(self.0.query_raw(query, params))?; + Ok(Iter::new(stream)) } /// Creates a new prepared statement. diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index ac30369e5..d9dbbc9ca 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -55,19 +55,22 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.query_iter(query, params)?.collect() + executor::block_on(self.0.query(query, params)) } - /// Like `Client::query_iter`. - pub fn query_iter<'b, T>( - &'b mut self, - query: &'b T, - params: &'b [&(dyn ToSql + Sync)], - ) -> Result + 'b, Error> + /// Like `Client::query_raw`. + pub fn query_raw<'b, T, I>( + &mut self, + query: &T, + params: I, + ) -> Result, Error> where T: ?Sized + ToStatement, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, { - Ok(Iter::new(self.0.query(query, params))) + let stream = executor::block_on(self.0.query_raw(query, params))?; + Ok(Iter::new(stream)) } /// Binds parameters to a statement, creating a "portal". diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 29d4a331f..cb7d6f263 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,6 +3,7 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; +use crate::query::RowStream; use crate::slice_iter; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; @@ -18,7 +19,7 @@ use crate::{Error, Statement}; use bytes::{Bytes, IntoBuf}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, Stream, TryFutureExt, TryStream}; +use futures::{future, Stream, TryFutureExt, TryStream, TryStreamExt}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; @@ -190,40 +191,40 @@ impl Client { prepare::prepare(&self.inner, query, parameter_types).await } - /// Executes a statement, returning a stream of the resulting rows. + /// Executes a statement, returning a vector of the resulting rows. /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn query<'a, T>( - &'a self, - statement: &'a T, - params: &'a [&(dyn ToSql + Sync)], - ) -> impl Stream> + 'a + pub async fn query( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> where T: ?Sized + ToStatement, { - self.query_iter(statement, slice_iter(params)) + self.query_raw(statement, slice_iter(params)) + .await? + .try_collect() + .await } - /// Like [`query`], but takes an iterator of parameters rather than a slice. + /// The maximally flexible version of [`query`]. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. /// /// [`query`]: #method.query - pub fn query_iter<'a, T, I>( - &'a self, - statement: &'a T, - params: I, - ) -> impl Stream> + 'a + pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator + 'a, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let f = async move { - let statement = statement.__convert().into_statement(self).await?; - Ok(query::query(&self.inner, statement, params)) - }; - f.try_flatten_stream() + let statement = statement.__convert().into_statement(self).await?; + query::query(&self.inner, statement, params).await } /// Executes a statement, returning the number of rows modified. @@ -241,13 +242,17 @@ impl Client { where T: ?Sized + ToStatement, { - self.execute_iter(statement, slice_iter(params)).await + self.execute_raw(statement, slice_iter(params)).await } - /// Like [`execute`], but takes an iterator of parameters rather than a slice. + /// The maximally flexible version of [`execute`]. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. /// /// [`execute`]: #method.execute - pub async fn execute_iter<'a, T, I>(&self, statement: &T, params: I) -> Result + pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, I: IntoIterator, diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0a3fa6aef..390369279 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,7 +3,7 @@ //! # Example //! //! ```no_run -//! use futures::{FutureExt, TryStreamExt}; +//! use futures::FutureExt; //! use tokio_postgres::{NoTls, Error, Row}; //! //! # #[cfg(not(feature = "runtime"))] fn main() {} @@ -29,7 +29,6 @@ //! // And then execute it, returning a Stream of Rows which we collect into a Vec. //! let rows: Vec = client //! .query(&stmt, &[&"hello world"]) -//! .try_collect() //! .await?; //! //! // Now we can check that we got back the same string we sent over. diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index f3c187124..8f27156d4 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -6,7 +6,7 @@ use crate::types::{Field, Kind, Oid, Type}; use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use fallible_iterator::FallibleIterator; -use futures::{future, TryStreamExt}; +use futures::TryStreamExt; use pin_utils::pin_mut; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -132,8 +132,7 @@ async fn get_type(client: &Arc, oid: Oid) -> Result { let stmt = typeinfo_statement(client).await?; - let params = &[&oid as _]; - let rows = query::query(client, stmt, slice_iter(params)); + let rows = query::query(client, stmt, slice_iter(&[&oid])).await?; pin_mut!(rows); let row = match rows.try_next().await? { @@ -204,7 +203,8 @@ async fn get_enum_variants(client: &Arc, oid: Oid) -> Result, oid: Oid) -> Result>() .await?; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index ee04866b6..5260da264 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -9,24 +9,21 @@ use postgres_protocol::message::frontend; use std::pin::Pin; use std::task::{Context, Poll}; -pub fn query<'a, I>( - client: &'a InnerClient, +pub async fn query<'a, I>( + client: &InnerClient, statement: Statement, params: I, -) -> impl Stream> + 'a +) -> Result where - I: IntoIterator + 'a, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let f = async move { - let buf = encode(&statement, params)?; - let responses = start(client, buf).await?; - Ok(Query { - statement, - responses, - }) - }; - f.try_flatten_stream() + let buf = encode(&statement, params)?; + let responses = start(client, buf).await?; + Ok(RowStream { + statement, + responses, + }) } pub fn query_portal<'a>( @@ -41,7 +38,7 @@ pub fn query_portal<'a>( let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - Ok(Query { + Ok(RowStream { statement: portal.statement().clone(), responses, }) @@ -145,12 +142,12 @@ where } } -struct Query { +pub struct RowStream { statement: Statement, responses: Responses, } -impl Stream for Query { +impl Stream for RowStream { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 5f9dc8fd8..8407c79c2 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -1,5 +1,6 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::query::RowStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; @@ -93,29 +94,25 @@ impl<'a> Transaction<'a> { } /// Like `Client::query`. - pub fn query<'b, T>( - &'b self, - statement: &'b T, - params: &'b [&(dyn ToSql + Sync)], - ) -> impl Stream> + 'b + pub async fn query( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> where T: ?Sized + ToStatement, { - self.client.query(statement, params) + self.client.query(statement, params).await } - /// Like `Client::query_iter`. - pub fn query_iter<'b, T, I>( - &'b self, - statement: &'b T, - params: I, - ) -> impl Stream> + 'b + /// Like `Client::query_raw`. + pub async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator + 'b, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - self.client.query_iter(statement, params) + self.client.query_raw(statement, params).await } /// Like `Client::execute`. @@ -131,7 +128,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute_iter`. - pub async fn execute_iter<'b, I, T>( + pub async fn execute_raw<'b, I, T>( &self, statement: &Statement, params: I, @@ -141,7 +138,7 @@ impl<'a> Transaction<'a> { I: IntoIterator, I::IntoIter: ExactSizeIterator, { - self.client.execute_iter(statement, params).await + self.client.execute_raw(statement, params).await } /// Binds a statement to a set of parameters, creating a `Portal` which can be incrementally queried. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index f3b37e968..547195bb6 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -126,7 +126,7 @@ async fn insert_select() { let (insert, select) = try_join!(insert, select).unwrap(); let insert = client.execute(&insert, &[&"alice", &"bob"]); - let select = client.query(&select, &[]).try_collect::>(); + let select = client.query(&select, &[]); let (_, rows) = try_join!(insert, select).unwrap(); assert_eq!(rows.len(), 2); @@ -337,7 +337,6 @@ async fn transaction_commit() { let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); @@ -369,7 +368,6 @@ async fn transaction_rollback() { let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); @@ -400,7 +398,6 @@ async fn transaction_rollback_drop() { let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); @@ -436,7 +433,6 @@ async fn copy_in() { .unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); @@ -503,7 +499,6 @@ async fn copy_in_error() { .unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); assert_eq!(rows.len(), 0); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 07f0ed4fb..50b3ab6f6 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,4 +1,4 @@ -use futures::{join, FutureExt, TryStreamExt}; +use futures::{join, FutureExt}; use std::time::{Duration, Instant}; use tokio::timer; use tokio_postgres::error::SqlState; @@ -18,7 +18,6 @@ async fn smoke_test(s: &str) { let stmt = client.prepare("SELECT $1::INT").await.unwrap(); let rows = client .query(&stmt, &[&1i32]) - .try_collect::>() .await .unwrap(); assert_eq!(rows[0].get::<_, i32>(0), 1i32); diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 6f7dd5eb7..40d3017cc 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -1,4 +1,3 @@ -use futures::TryStreamExt; use postgres_types::to_sql_checked; use std::collections::HashMap; use std::error::Error; @@ -35,7 +34,6 @@ where for (val, repr) in checks { let rows = client .query(&*format!("SELECT {}::{}", repr, sql_type), &[]) - .try_collect::>() .await .unwrap(); let result = rows[0].get(0); @@ -43,7 +41,6 @@ where let rows = client .query(&*format!("SELECT $1::{}", sql_type), &[&val]) - .try_collect::>() .await .unwrap(); let result = rows[0].get(0); @@ -200,7 +197,6 @@ async fn test_borrowed_text() { let stmt = client.prepare("SELECT 'foo'").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); let s: &str = rows[0].get(0); @@ -236,10 +232,11 @@ async fn test_bpchar_params() { .unwrap(); let rows = client .query(&stmt, &[]) - .map_ok(|row| row.get(0)) - .try_collect::>>() .await - .unwrap(); + .unwrap() + .into_iter() + .map(|row| row.get(0)) + .collect::>>(); assert_eq!( vec![Some("12345".to_owned()), Some("123 ".to_owned()), None], @@ -276,10 +273,11 @@ async fn test_citext_params() { .unwrap(); let rows = client .query(&stmt, &[]) - .map_ok(|row| row.get(0)) - .try_collect::>() .await - .unwrap(); + .unwrap() + .into_iter() + .map(|row| row.get(0)) + .collect::>(); assert_eq!(vec!["foobar".to_string(), "FooBar".to_string()], rows,); } @@ -302,7 +300,6 @@ async fn test_borrowed_bytea() { let stmt = client.prepare("SELECT 'foo'::BYTEA").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); let s: &[u8] = rows[0].get(0); @@ -365,7 +362,6 @@ where .unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); let val: T = rows[0].get(0); @@ -391,7 +387,6 @@ async fn test_pg_database_datname() { .unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); assert_eq!(rows[0].get::<_, &str>(0), "postgres"); @@ -418,10 +413,11 @@ async fn test_slice() { .unwrap(); let rows = client .query(&stmt, &[&&[1i32, 3, 4][..]]) - .map_ok(|r| r.get(0)) - .try_collect::>() .await - .unwrap(); + .unwrap() + .into_iter() + .map(|r| r.get(0)) + .collect::>(); assert_eq!(vec!["a".to_owned(), "c".to_owned(), "d".to_owned()], rows); } @@ -445,7 +441,6 @@ async fn test_slice_wrong_type() { .unwrap(); let err = client .query(&stmt, &[&&[&"hi"][..]]) - .try_collect::>() .await .err() .unwrap(); @@ -462,7 +457,6 @@ async fn test_slice_range() { let stmt = client.prepare("SELECT $1::INT8RANGE").await.unwrap(); let err = client .query(&stmt, &[&&[&1i64][..]]) - .try_collect::>() .await .err() .unwrap(); @@ -535,7 +529,6 @@ async fn domain() { let stmt = client.prepare("SELECT id FROM pg_temp.foo").await.unwrap(); let rows = client .query(&stmt, &[]) - .try_collect::>() .await .unwrap(); assert_eq!(id, rows[0].get(0)); From b8577b45b1dae4be16a47c89624f044b3b91f270 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 17:22:56 -0700 Subject: [PATCH 262/819] Overhaul query_portal --- postgres-native-tls/src/test.rs | 12 +++------ postgres-openssl/src/test.rs | 12 +++------ postgres/src/transaction.rs | 16 +++++------ tokio-postgres/src/query.rs | 30 +++++++++------------ tokio-postgres/src/transaction.rs | 20 +++++++++----- tokio-postgres/tests/test/main.rs | 31 ++++++--------------- tokio-postgres/tests/test/runtime.rs | 5 +--- tokio-postgres/tests/test/types/mod.rs | 37 +++++--------------------- 8 files changed, 57 insertions(+), 106 deletions(-) diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 6eb27b23e..7a50bc672 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,4 +1,4 @@ -use futures::{FutureExt}; +use futures::FutureExt; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; @@ -21,10 +21,7 @@ where tokio::spawn(connection); let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); - let rows = client - .query(&stmt, &[&1i32]) - .await - .unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -96,10 +93,7 @@ async fn runtime() { tokio::spawn(connection); let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); - let rows = client - .query(&stmt, &[&1i32]) - .await - .unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index eb3e5e295..15ed90ad5 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -1,4 +1,4 @@ -use futures::{FutureExt}; +use futures::FutureExt; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; @@ -19,10 +19,7 @@ where tokio::spawn(connection); let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); - let rows = client - .query(&stmt, &[&1i32]) - .await - .unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -107,10 +104,7 @@ async fn runtime() { tokio::spawn(connection); let stmt = client.prepare("SELECT $1::INT4").await.unwrap(); - let rows = client - .query(&stmt, &[&1i32]) - .await - .unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 1); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index d9dbbc9ca..895c69396 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -95,17 +95,17 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - self.query_portal_iter(portal, max_rows)?.collect() + executor::block_on(self.0.query_portal(portal, max_rows)) } - /// Like `query_portal`, except that it returns a fallible iterator over the resulting rows rather than buffering - /// the entire response in memory. - pub fn query_portal_iter<'b>( - &'b mut self, - portal: &'b Portal, + /// The maximally flexible version of `query_portal`. + pub fn query_portal_raw( + &mut self, + portal: &Portal, max_rows: i32, - ) -> Result + 'b, Error> { - Ok(Iter::new(self.0.query_portal(&portal, max_rows))) + ) -> Result, Error> { + let stream = executor::block_on(self.0.query_portal_raw(portal, max_rows))?; + Ok(Iter::new(stream)) } /// Like `Client::copy_in`. diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 5260da264..57af33f6c 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -3,7 +3,7 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; use crate::{Error, Portal, Row, Statement}; -use futures::{ready, Stream, TryFutureExt}; +use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; @@ -26,25 +26,21 @@ where }) } -pub fn query_portal<'a>( - client: &'a InnerClient, - portal: &'a Portal, +pub async fn query_portal( + client: &InnerClient, + portal: &Portal, max_rows: i32, -) -> impl Stream> + 'a { - let start = async move { - let mut buf = vec![]; - frontend::execute(portal.name(), max_rows, &mut buf).map_err(Error::encode)?; - frontend::sync(&mut buf); - - let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; +) -> Result { + let mut buf = vec![]; + frontend::execute(portal.name(), max_rows, &mut buf).map_err(Error::encode)?; + frontend::sync(&mut buf); - Ok(RowStream { - statement: portal.statement().clone(), - responses, - }) - }; + let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - start.try_flatten_stream() + Ok(RowStream { + statement: portal.statement().clone(), + responses, + }) } pub async fn execute<'a, I>( diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 8407c79c2..ee6db4c92 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -11,7 +11,7 @@ use crate::{ bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; use bytes::{Bytes, IntoBuf}; -use futures::{Stream, TryStream}; +use futures::{Stream, TryStream, TryStreamExt}; use postgres_protocol::message::frontend; use std::error; use tokio::io::{AsyncRead, AsyncWrite}; @@ -177,12 +177,20 @@ impl<'a> Transaction<'a> { /// /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all rows will be returned. - pub fn query_portal<'b>( - &'b self, - portal: &'b Portal, + pub async fn query_portal(&self, portal: &Portal, max_rows: i32) -> Result, Error> { + self.query_portal_raw(portal, max_rows) + .await? + .try_collect() + .await + } + + /// The maximally flexible version of `query_portal`. + pub async fn query_portal_raw( + &self, + portal: &Portal, max_rows: i32, - ) -> impl Stream> + 'b { - query::query_portal(self.client.inner(), portal, max_rows) + ) -> Result { + query::query_portal(self.client.inner(), portal, max_rows).await } /// Like `Client::copy_in`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 547195bb6..4beb3fe0b 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -335,10 +335,7 @@ async fn transaction_commit() { transaction.commit().await.unwrap(); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, &str>(0), "steven"); @@ -366,10 +363,7 @@ async fn transaction_rollback() { transaction.rollback().await.unwrap(); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows.len(), 0); } @@ -396,10 +390,7 @@ async fn transaction_rollback_drop() { drop(transaction); let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows.len(), 0); } @@ -431,10 +422,7 @@ async fn copy_in() { .prepare("SELECT id, name FROM foo ORDER BY id") .await .unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -497,10 +485,7 @@ async fn copy_in_error() { .prepare("SELECT id, name FROM foo ORDER BY id") .await .unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows.len(), 0); } @@ -583,9 +568,9 @@ async fn query_portal() { let transaction = client.transaction().await.unwrap(); let portal = transaction.bind(&stmt, &[]).await.unwrap(); - let f1 = transaction.query_portal(&portal, 2).try_collect::>(); - let f2 = transaction.query_portal(&portal, 2).try_collect::>(); - let f3 = transaction.query_portal(&portal, 2).try_collect::>(); + let f1 = transaction.query_portal(&portal, 2); + let f2 = transaction.query_portal(&portal, 2); + let f3 = transaction.query_portal(&portal, 2); let (r1, r2, r3) = try_join!(f1, f2, f3).unwrap(); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 50b3ab6f6..dbfe91928 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -16,10 +16,7 @@ async fn smoke_test(s: &str) { let client = connect(s).await; let stmt = client.prepare("SELECT $1::INT").await.unwrap(); - let rows = client - .query(&stmt, &[&1i32]) - .await - .unwrap(); + let rows = client.query(&stmt, &[&1i32]).await.unwrap(); assert_eq!(rows[0].get::<_, i32>(0), 1i32); } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 40d3017cc..fefd1ed5b 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -195,10 +195,7 @@ async fn test_borrowed_text() { let client = connect("user=postgres").await; let stmt = client.prepare("SELECT 'foo'").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); let s: &str = rows[0].get(0); assert_eq!(s, "foo"); } @@ -298,10 +295,7 @@ async fn test_bytea_params() { async fn test_borrowed_bytea() { let client = connect("user=postgres").await; let stmt = client.prepare("SELECT 'foo'::BYTEA").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); let s: &[u8] = rows[0].get(0); assert_eq!(s, b"foo"); } @@ -360,10 +354,7 @@ where .prepare(&format!("SELECT 'NaN'::{}", sql_type)) .await .unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); let val: T = rows[0].get(0); assert!(val != val); } @@ -385,10 +376,7 @@ async fn test_pg_database_datname() { .prepare("SELECT datname FROM pg_database") .await .unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(rows[0].get::<_, &str>(0), "postgres"); } @@ -439,11 +427,7 @@ async fn test_slice_wrong_type() { .prepare("SELECT * FROM foo WHERE id = ANY($1)") .await .unwrap(); - let err = client - .query(&stmt, &[&&[&"hi"][..]]) - .await - .err() - .unwrap(); + let err = client.query(&stmt, &[&&[&"hi"][..]]).await.err().unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), @@ -455,11 +439,7 @@ async fn test_slice_range() { let client = connect("user=postgres").await; let stmt = client.prepare("SELECT $1::INT8RANGE").await.unwrap(); - let err = client - .query(&stmt, &[&&[&1i64][..]]) - .await - .err() - .unwrap(); + let err = client.query(&stmt, &[&&[&1i64][..]]).await.err().unwrap(); match err.source() { Some(e) if e.is::() => {} _ => panic!("Unexpected error {:?}", err), @@ -527,10 +507,7 @@ async fn domain() { client.execute(&stmt, &[&id]).await.unwrap(); let stmt = client.prepare("SELECT id FROM pg_temp.foo").await.unwrap(); - let rows = client - .query(&stmt, &[]) - .await - .unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); assert_eq!(id, rows[0].get(0)); } From a3f611d60951578a538735e2040555bdd78d93a9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 8 Oct 2019 19:01:34 -0700 Subject: [PATCH 263/819] Overhaul copy_out --- postgres/src/client.rs | 12 ++++++------ postgres/src/transaction.rs | 12 ++++++------ tokio-postgres/src/client.rs | 24 +++++++++++------------- tokio-postgres/src/copy_out.rs | 23 ++++++++++------------- tokio-postgres/src/transaction.rs | 15 ++++++++------- tokio-postgres/tests/test/main.rs | 8 +++++++- 6 files changed, 48 insertions(+), 46 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index f99afa1c3..97eec69f5 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -275,15 +275,15 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn copy_out<'a, T>( - &'a mut self, - query: &'a T, - params: &'a [&(dyn ToSql + Sync)], - ) -> Result + pub fn copy_out( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result where T: ?Sized + ToStatement, { - let stream = self.0.copy_out(query, params); + let stream = executor::block_on(self.0.copy_out(query, params))?; CopyOutReader::new(stream) } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 895c69396..3e509ac95 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -123,15 +123,15 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_out`. - pub fn copy_out<'b, T>( - &'b mut self, - query: &'b T, - params: &'b [&(dyn ToSql + Sync)], - ) -> Result + pub fn copy_out( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result where T: ?Sized + ToStatement, { - let stream = self.0.copy_out(query, params); + let stream = executor::block_on(self.0.copy_out(query, params))?; CopyOutReader::new(stream) } diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index cb7d6f263..c7b91f174 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,6 +3,7 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; +use crate::copy_out::CopyStream; use crate::query::RowStream; use crate::slice_iter; #[cfg(feature = "runtime")] @@ -16,10 +17,10 @@ use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; -use bytes::{Bytes, IntoBuf}; +use bytes::IntoBuf; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, Stream, TryFutureExt, TryStream, TryStreamExt}; +use futures::{future, Stream, TryStream, TryStreamExt}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; @@ -293,20 +294,17 @@ impl Client { /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub fn copy_out<'a, T>( - &'a self, - statement: &'a T, - params: &'a [&(dyn ToSql + Sync)], - ) -> impl Stream> + 'a + pub async fn copy_out( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result where T: ?Sized + ToStatement, { - let f = async move { - let statement = statement.__convert().into_statement(self).await?; - let params = slice_iter(params); - Ok(copy_out::copy_out(self.inner(), statement, params)) - }; - f.try_flatten_stream() + let statement = statement.__convert().into_statement(self).await?; + let params = slice_iter(params); + copy_out::copy_out(self.inner(), statement, params).await } /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 30c85ee71..41bd87476 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -4,26 +4,23 @@ use crate::connection::RequestMessages; use crate::types::ToSql; use crate::{query, Error, Statement}; use bytes::Bytes; -use futures::{ready, Stream, TryFutureExt}; +use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use std::pin::Pin; use std::task::{Context, Poll}; -pub fn copy_out<'a, I>( - client: &'a InnerClient, +pub async fn copy_out<'a, I>( + client: &InnerClient, statement: Statement, params: I, -) -> impl Stream> + 'a +) -> Result where - I: IntoIterator + 'a, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let f = async move { - let buf = query::encode(&statement, params)?; - let responses = start(client, buf).await?; - Ok(CopyOut { responses }) - }; - f.try_flatten_stream() + let buf = query::encode(&statement, params)?; + let responses = start(client, buf).await?; + Ok(CopyStream { responses }) } async fn start(client: &InnerClient, buf: Vec) -> Result { @@ -42,11 +39,11 @@ async fn start(client: &InnerClient, buf: Vec) -> Result { Ok(responses) } -struct CopyOut { +pub struct CopyStream { responses: Responses, } -impl Stream for CopyOut { +impl Stream for CopyStream { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index ee6db4c92..49d24d505 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -1,5 +1,6 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::copy_out::CopyStream; use crate::query::RowStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; @@ -10,7 +11,7 @@ use crate::Socket; use crate::{ bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; -use bytes::{Bytes, IntoBuf}; +use bytes::IntoBuf; use futures::{Stream, TryStream, TryStreamExt}; use postgres_protocol::message::frontend; use std::error; @@ -211,15 +212,15 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_out`. - pub fn copy_out<'b, T>( - &'b self, - statement: &'b T, - params: &'b [&(dyn ToSql + Sync)], - ) -> impl Stream> + 'b + pub async fn copy_out( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result where T: ?Sized + ToStatement, { - self.client.copy_out(statement, params) + self.client.copy_out(statement, params).await } /// Like `Client::simple_query`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 4beb3fe0b..9069f41e1 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -506,7 +506,13 @@ async fn copy_out() { .unwrap(); let stmt = client.prepare("COPY foo TO STDOUT").await.unwrap(); - let data = client.copy_out(&stmt, &[]).try_concat().await.unwrap(); + let data = client + .copy_out(&stmt, &[]) + .await + .unwrap() + .try_concat() + .await + .unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } From c7055dc66506d4a696c16063ceb8ffe5decff0ee Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 9 Oct 2019 15:20:23 -0700 Subject: [PATCH 264/819] Clippy fixes --- codegen/src/type_gen.rs | 5 ++--- postgres-native-tls/src/lib.rs | 1 + postgres-openssl/src/lib.rs | 1 + tokio-postgres/src/query.rs | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 1e882ab18..1b3379aba 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -144,9 +144,8 @@ impl<'a> DatParser<'a> { fn eof(&mut self) { self.skip_ws(); - match self.it.next() { - Some((_, ch)) => panic!("expected eof but got {}", ch), - None => {} + if let Some((_, ch)) = self.it.next() { + panic!("expected eof but got {}", ch); } } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index a79fbebd7..b231586ac 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -110,6 +110,7 @@ where { type Stream = TlsStream; type Error = native_tls::Error; + #[allow(clippy::type_complexity)] type Future = Pin< Box, ChannelBinding), native_tls::Error>> + Send>, >; diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 78f4620e1..ff7c3d7aa 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -132,6 +132,7 @@ where { type Stream = SslStream; type Error = HandshakeError; + #[allow(clippy::type_complexity)] type Future = Pin< Box, ChannelBinding), HandshakeError>> + Send>, >; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 57af33f6c..5686ab9fb 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -133,8 +133,8 @@ where ); match r { Ok(()) => Ok(buf), - Err(frontend::BindError::Conversion(e)) => return Err(Error::to_sql(e, error_idx)), - Err(frontend::BindError::Serialization(e)) => return Err(Error::encode(e)), + Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e, error_idx)), + Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)), } } From 8c28f8b36371a5e7df43c9ca89f5657f4034aa31 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 9 Oct 2019 17:04:24 -0700 Subject: [PATCH 265/819] Overhaul simple_query --- postgres/src/client.rs | 17 +---------------- postgres/src/transaction.rs | 10 +--------- tokio-postgres/src/client.rs | 17 +++++++++++------ tokio-postgres/src/connect.rs | 15 ++++++++++++--- tokio-postgres/src/simple_query.rs | 29 +++++++++++++---------------- tokio-postgres/src/transaction.rs | 12 ++++++------ tokio-postgres/tests/test/main.rs | 1 - 7 files changed, 44 insertions(+), 57 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 97eec69f5..1fa7cc6ff 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -303,22 +303,7 @@ impl Client { /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { - self.simple_query_iter(query)?.collect() - } - - /// Like `simple_query`, except that it returns a fallible iterator over the resulting values rather than buffering - /// the response in memory. - /// - /// # Warning - /// - /// Prepared statements should be use for any query which contains user-specified data, as they provided the - /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass - /// them to this method! - pub fn simple_query_iter<'a>( - &'a mut self, - query: &'a str, - ) -> Result + 'a, Error> { - Ok(Iter::new(self.0.simple_query(query))) + executor::block_on(self.0.simple_query(query)) } /// Executes a sequence of SQL statements using the simple query protocol. diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 3e509ac95..7066a3e81 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -137,15 +137,7 @@ impl<'a> Transaction<'a> { /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { - self.simple_query_iter(query)?.collect() - } - - /// Like `Client::simple_query_iter`. - pub fn simple_query_iter<'b>( - &'b mut self, - query: &'b str, - ) -> Result + 'b, Error> { - Ok(Iter::new(self.0.simple_query(query))) + executor::block_on(self.0.simple_query(query)) } /// Like `Client::batch_execute`. diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index c7b91f174..d91c25b46 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,6 +3,7 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; +use crate::simple_query::SimpleQueryStream; use crate::copy_out::CopyStream; use crate::query::RowStream; use crate::slice_iter; @@ -20,7 +21,7 @@ use crate::{Error, Statement}; use bytes::IntoBuf; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, Stream, TryStream, TryStreamExt}; +use futures::{future, TryStream, TryStreamExt}; use futures::{ready, StreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; @@ -320,11 +321,15 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub fn simple_query<'a>( - &'a self, - query: &'a str, - ) -> impl Stream> + 'a { - simple_query::simple_query(self.inner(), query) + pub async fn simple_query( + &self, + query: &str, + ) -> Result, Error> { + self.simple_query_raw(query).await?.try_collect().await + } + + pub(crate) async fn simple_query_raw(&self, query: &str) -> Result { + simple_query::simple_query(self.inner(), query).await } /// Executes a sequence of SQL statements using the simple query protocol. diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 5197200ef..549b5148a 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -4,7 +4,7 @@ use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::future; +use futures::{future, Future}; use futures::{FutureExt, Stream}; use pin_utils::pin_mut; use std::io; @@ -50,7 +50,7 @@ where } } - return Err(error.unwrap()); + Err(error.unwrap()) } async fn connect_once( @@ -73,7 +73,16 @@ where let (mut client, mut connection) = connect_raw(socket, tls, config).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { - let rows = client.simple_query("SHOW transaction_read_only"); + let rows = client.simple_query_raw("SHOW transaction_read_only"); + pin_mut!(rows); + + let rows = future::poll_fn(|cx| { + if connection.poll_unpin(cx)?.is_ready() { + return Poll::Ready(Err(Error::closed())); + } + + rows.as_mut().poll(cx) + }).await?; pin_mut!(rows); loop { diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index d1e3bfd2c..98e0f0e95 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -3,27 +3,24 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use fallible_iterator::FallibleIterator; -use futures::{ready, Stream, TryFutureExt}; +use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -pub fn simple_query<'a>( - client: &'a InnerClient, - query: &'a str, -) -> impl Stream> + 'a { - let f = async move { - let buf = encode(query)?; - let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; +pub async fn simple_query( + client: &InnerClient, + query: &str, +) -> Result { + let buf = encode(query)?; + let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - Ok(SimpleQuery { - responses, - columns: None, - }) - }; - f.try_flatten_stream() + Ok(SimpleQueryStream { + responses, + columns: None, + }) } pub async fn batch_execute(client: &InnerClient, query: &str) -> Result<(), Error> { @@ -48,12 +45,12 @@ fn encode(query: &str) -> Result, Error> { Ok(buf) } -struct SimpleQuery { +pub struct SimpleQueryStream { responses: Responses, columns: Option>, } -impl Stream for SimpleQuery { +impl Stream for SimpleQueryStream { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 49d24d505..a8877a466 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -12,7 +12,7 @@ use crate::{ bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; use bytes::IntoBuf; -use futures::{Stream, TryStream, TryStreamExt}; +use futures::{TryStream, TryStreamExt}; use postgres_protocol::message::frontend; use std::error; use tokio::io::{AsyncRead, AsyncWrite}; @@ -224,11 +224,11 @@ impl<'a> Transaction<'a> { } /// Like `Client::simple_query`. - pub fn simple_query<'b>( - &'b self, - query: &'b str, - ) -> impl Stream> + 'b { - self.client.simple_query(query) + pub async fn simple_query( + &self, + query: &str, + ) -> Result, Error> { + self.client.simple_query(query).await } /// Like `Client::batch_execute`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 9069f41e1..c7539cd97 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -264,7 +264,6 @@ async fn simple_query() { INSERT INTO foo (name) VALUES ('steven'), ('joe'); SELECT * FROM foo ORDER BY id;", ) - .try_collect::>() .await .unwrap(); From 31855141d2eac62c0b3fff34a5825602c177693d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 9 Oct 2019 17:45:53 -0700 Subject: [PATCH 266/819] Add query_one --- postgres/src/client.rs | 37 ++++++++++++++++- tokio-postgres/src/client.rs | 66 +++++++++++++++++++++++++++++++ tokio-postgres/src/error/mod.rs | 6 +++ tokio-postgres/src/transaction.rs | 12 ++++++ tokio-postgres/tests/test/main.rs | 16 ++++++++ 5 files changed, 135 insertions(+), 2 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 1fa7cc6ff..36caf9820 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -96,8 +96,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// The `query_iter` method can be used to avoid buffering all rows in memory at once. - /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. @@ -125,6 +123,41 @@ impl Client { executor::block_on(self.0.query(query, params)) } + /// Executes a statement which returns a single row, returning it. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let baz = true; + /// let row = client.query_one("SELECT foo FROM bar WHERE baz = $1", &[&baz])?; + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// # Ok(()) + /// # } + /// ``` + pub fn query_one(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + executor::block_on(self.0.query_one(query, params)) + } + /// A maximally-flexible version of `query`. /// /// It takes an iterator of parameters rather than a slice, and returns an iterator of rows rather than collecting diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index d91c25b46..53c8a138c 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -9,6 +9,7 @@ use crate::query::RowStream; use crate::slice_iter; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; +use pin_utils::pin_mut; use crate::tls::TlsConnect; use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; @@ -195,6 +196,13 @@ impl Client { /// Executes a statement, returning a vector of the resulting rows. /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. @@ -212,8 +220,52 @@ impl Client { .await } + /// Executes a statement which returns a single row, returning it. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// Returns an error if the query does not return exactly one row. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + pub async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + { + let stream = self.query_raw(statement, slice_iter(params)).await?; + pin_mut!(stream); + + let row = match stream.try_next().await? { + Some(row) => row, + None => return Err(Error::row_count()), + }; + + if stream.try_next().await?.is_some() { + return Err(Error::row_count()); + } + + Ok(row) + } + /// The maximally flexible version of [`query`]. /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. @@ -231,6 +283,13 @@ impl Client { /// Executes a statement, returning the number of rows modified. /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. /// /// # Panics @@ -249,6 +308,13 @@ impl Client { /// The maximally flexible version of [`execute`]. /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 199f1b678..695e5923d 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -345,6 +345,7 @@ enum Kind { Authentication, ConfigParse, Config, + RowCount, #[cfg(feature = "runtime")] Connect, } @@ -383,6 +384,7 @@ impl fmt::Display for Error { Kind::Authentication => fmt.write_str("authentication error")?, Kind::ConfigParse => fmt.write_str("invalid connection string")?, Kind::Config => fmt.write_str("invalid configuration")?, + Kind::RowCount => fmt.write_str("query returned an unexpected number of rows")?, #[cfg(feature = "runtime")] Kind::Connect => fmt.write_str("error connecting to server")?, }; @@ -483,6 +485,10 @@ impl Error { Error::new(Kind::Config, Some(e)) } + pub(crate) fn row_count() -> Error { + Error::new(Kind::RowCount, None) + } + #[cfg(feature = "runtime")] pub(crate) fn connect(e: io::Error) -> Error { Error::new(Kind::Connect, Some(Box::new(e))) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index a8877a466..e8c2a6cf2 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -106,6 +106,18 @@ impl<'a> Transaction<'a> { self.client.query(statement, params).await } + /// Like `Client::query_one`. + pub async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement, + { + self.client.query_one(statement, params).await + } + /// Like `Client::query_raw`. pub async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index c7539cd97..8c282c6e9 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -642,3 +642,19 @@ async fn check_send() { is_send(&f); drop(f); } + +#[tokio::test] +async fn query_one() { + let client = connect("user=postgres").await; + + client.batch_execute(" + CREATE TEMPORARY TABLE foo ( + name TEXT + ); + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('carol'); + ").await.unwrap(); + + client.query_one("SELECT * FROM foo WHERE name = 'dave'", &[]).await.err().unwrap(); + client.query_one("SELECT * FROM foo WHERE name = 'alice'", &[]).await.unwrap(); + client.query_one("SELECT * FROM foo", &[]).await.err().unwrap(); +} From 218d88904232ac5cf6a14e608c30dece1f9e7fd6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 9 Oct 2019 19:23:12 -0700 Subject: [PATCH 267/819] Move postgres-derive in-tree --- Cargo.toml | 2 + postgres-derive-test/Cargo.toml | 9 + postgres-derive-test/src/composites.rs | 217 +++++++++++++++++++++++++ postgres-derive-test/src/domains.rs | 121 ++++++++++++++ postgres-derive-test/src/enums.rs | 104 ++++++++++++ postgres-derive-test/src/lib.rs | 27 +++ postgres-derive/Cargo.toml | 18 ++ postgres-derive/src/accepts.rs | 86 ++++++++++ postgres-derive/src/composites.rs | 22 +++ postgres-derive/src/enums.rs | 28 ++++ postgres-derive/src/fromsql.rs | 200 +++++++++++++++++++++++ postgres-derive/src/lib.rs | 28 ++++ postgres-derive/src/overrides.rs | 49 ++++++ postgres-derive/src/tosql.rs | 160 ++++++++++++++++++ postgres-types/Cargo.toml | 2 + postgres-types/src/lib.rs | 3 + 16 files changed, 1076 insertions(+) create mode 100644 postgres-derive-test/Cargo.toml create mode 100644 postgres-derive-test/src/composites.rs create mode 100644 postgres-derive-test/src/domains.rs create mode 100644 postgres-derive-test/src/enums.rs create mode 100644 postgres-derive-test/src/lib.rs create mode 100644 postgres-derive/Cargo.toml create mode 100644 postgres-derive/src/accepts.rs create mode 100644 postgres-derive/src/composites.rs create mode 100644 postgres-derive/src/enums.rs create mode 100644 postgres-derive/src/fromsql.rs create mode 100644 postgres-derive/src/lib.rs create mode 100644 postgres-derive/src/overrides.rs create mode 100644 postgres-derive/src/tosql.rs diff --git a/Cargo.toml b/Cargo.toml index fe3568fa9..4752836a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,8 @@ members = [ "codegen", "postgres", + "postgres-derive", + "postgres-derive-test", "postgres-native-tls", "postgres-openssl", "postgres-protocol", diff --git a/postgres-derive-test/Cargo.toml b/postgres-derive-test/Cargo.toml new file mode 100644 index 000000000..1632660eb --- /dev/null +++ b/postgres-derive-test/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "postgres-derive-test" +version = "0.1.0" +authors = ["Steven Fackler "] +edition = "2018" + +[dependencies] +postgres-types = { path = "../postgres-types", features = ["derive"] } +postgres = { path = "../postgres" } diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs new file mode 100644 index 000000000..5efd3944c --- /dev/null +++ b/postgres-derive-test/src/composites.rs @@ -0,0 +1,217 @@ +use crate::test_type; +use postgres::{Client, NoTls}; +use postgres_types::{FromSql, ToSql, WrongType}; +use std::error::Error; + +#[test] +fn defaults() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + struct InventoryItem { + name: String, + supplier_id: i32, + price: Option, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.\"InventoryItem\" AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: Some(15.50), + }; + + let item_null = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: None, + }; + + test_type( + &mut conn, + "\"InventoryItem\"", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + ); +} + +#[test] +fn name_overrides() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + #[postgres(name = "name")] + _name: String, + #[postgres(name = "supplier_id")] + _supplier_id: i32, + #[postgres(name = "price")] + _price: Option, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + _name: "foobar".to_owned(), + _supplier_id: 100, + _price: Some(15.50), + }; + + let item_null = InventoryItem { + _name: "foobar".to_owned(), + _supplier_id: 100, + _price: None, + }; + + test_type( + &mut conn, + "inventory_item", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + ); +} + +#[test] +fn wrong_name() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + struct InventoryItem { + name: String, + supplier_id: i32, + price: Option, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: Some(15.50), + }; + + let err = conn + .execute("SELECT $1::inventory_item", &[&item]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn extra_field() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + name: String, + supplier_id: i32, + price: Option, + foo: i32, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: Some(15.50), + foo: 0, + }; + + let err = conn + .execute("SELECT $1::inventory_item", &[&item]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn missing_field() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + name: String, + supplier_id: i32, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + }; + + let err = conn + .execute("SELECT $1::inventory_item", &[&item]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn wrong_type() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + name: String, + supplier_id: i32, + price: i32, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: 0, + }; + + let err = conn + .execute("SELECT $1::inventory_item", &[&item]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} diff --git a/postgres-derive-test/src/domains.rs b/postgres-derive-test/src/domains.rs new file mode 100644 index 000000000..25674f75e --- /dev/null +++ b/postgres-derive-test/src/domains.rs @@ -0,0 +1,121 @@ +use crate::test_type; +use postgres::{Client, NoTls}; +use postgres_types::{FromSql, ToSql, WrongType}; +use std::error::Error; + +#[test] +fn defaults() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + struct SessionId(Vec); + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE DOMAIN pg_temp.\"SessionId\" AS bytea CHECK(octet_length(VALUE) = 16);", + &[], + ) + .unwrap(); + + test_type( + &mut conn, + "\"SessionId\"", + &[( + SessionId(b"0123456789abcdef".to_vec()), + "'0123456789abcdef'", + )], + ); +} + +#[test] +fn name_overrides() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "session_id")] + struct SessionId(Vec); + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);", + &[], + ) + .unwrap(); + + test_type( + &mut conn, + "session_id", + &[( + SessionId(b"0123456789abcdef".to_vec()), + "'0123456789abcdef'", + )], + ); +} + +#[test] +fn wrong_name() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + struct SessionId(Vec); + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);", + &[], + ) + .unwrap(); + + let err = conn + .execute("SELECT $1::session_id", &[&SessionId(vec![])]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn wrong_type() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "session_id")] + struct SessionId(i32); + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);", + &[], + ) + .unwrap(); + + let err = conn + .execute("SELECT $1::session_id", &[&SessionId(0)]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn domain_in_composite() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "domain")] + struct Domain(String); + + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "composite")] + struct Composite { + domain: Domain, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + " + CREATE DOMAIN pg_temp.domain AS TEXT;\ + CREATE TYPE pg_temp.composite AS ( + domain domain + ); + ", + ) + .unwrap(); + + test_type( + &mut conn, + "composite", + &[( + Composite { + domain: Domain("hello".to_string()), + }, + "ROW('hello')", + )], + ); +} diff --git a/postgres-derive-test/src/enums.rs b/postgres-derive-test/src/enums.rs new file mode 100644 index 000000000..a7039ca05 --- /dev/null +++ b/postgres-derive-test/src/enums.rs @@ -0,0 +1,104 @@ +use crate::test_type; +use postgres::{Client, NoTls}; +use postgres_types::{FromSql, ToSql, WrongType}; +use std::error::Error; + +#[test] +fn defaults() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + enum Foo { + Bar, + Baz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.\"Foo\" AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + test_type( + &mut conn, + "\"Foo\"", + &[(Foo::Bar, "'Bar'"), (Foo::Baz, "'Baz'")], + ); +} + +#[test] +fn name_overrides() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "mood")] + enum Mood { + #[postgres(name = "sad")] + Sad, + #[postgres(name = "ok")] + Ok, + #[postgres(name = "happy")] + Happy, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE TYPE pg_temp.mood AS ENUM ('sad', 'ok', 'happy')", + &[], + ) + .unwrap(); + + test_type( + &mut conn, + "mood", + &[ + (Mood::Sad, "'sad'"), + (Mood::Ok, "'ok'"), + (Mood::Happy, "'happy'"), + ], + ); +} + +#[test] +fn wrong_name() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + enum Foo { + Bar, + Baz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn.execute("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn extra_variant() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "foo")] + enum Foo { + Bar, + Baz, + Buz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn.execute("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn missing_variant() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "foo")] + enum Foo { + Bar, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn.execute("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); + assert!(err.source().unwrap().is::()); +} diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs new file mode 100644 index 000000000..0bf429e38 --- /dev/null +++ b/postgres-derive-test/src/lib.rs @@ -0,0 +1,27 @@ +#![cfg(test)] + +use postgres::Client; +use postgres_types::{FromSqlOwned, ToSql}; +use std::fmt; + +mod composites; +mod domains; +mod enums; + +pub fn test_type(conn: &mut Client, sql_type: &str, checks: &[(T, S)]) +where + T: PartialEq + FromSqlOwned + ToSql + Sync, + S: fmt::Display, +{ + for &(ref val, ref repr) in checks.iter() { + let stmt = conn + .prepare(&*format!("SELECT {}::{}", *repr, sql_type)) + .unwrap(); + let result = conn.query_one(&stmt, &[]).unwrap().get(0); + assert_eq!(val, &result); + + let stmt = conn.prepare(&*format!("SELECT $1::{}", sql_type)).unwrap(); + let result = conn.query_one(&stmt, &[val]).unwrap().get(0); + assert_eq!(val, &result); + } +} diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml new file mode 100644 index 000000000..3dea8d98d --- /dev/null +++ b/postgres-derive/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "postgres-derive" +version = "0.3.3" +authors = ["Steven Fackler "] +license = "MIT/Apache-2.0" +description = "Deriving plugin support for Postgres enum, domain, and composite types" +repository = "https://github.com/sfackler/rust-postgres-derive" +readme = "README.md" +keywords = ["database", "postgres", "postgresql", "sql"] + +[lib] +proc-macro = true +test = false + +[dependencies] +syn = "1.0" +proc-macro2 = "1.0" +quote = "1.0" diff --git a/postgres-derive/src/accepts.rs b/postgres-derive/src/accepts.rs new file mode 100644 index 000000000..a7b8c6b5e --- /dev/null +++ b/postgres-derive/src/accepts.rs @@ -0,0 +1,86 @@ +use proc_macro2::{Span, TokenStream}; +use std::iter; +use syn::Ident; + +use composites::Field; +use enums::Variant; + +pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { + let ty = &field.ty; + + quote! { + if type_.name() != #name { + return false; + } + + match *type_.kind() { + ::postgres_types::Kind::Domain(ref type_) => { + <#ty as ::postgres_types::ToSql>::accepts(type_) + } + _ => false, + } + } +} + +pub fn enum_body(name: &str, variants: &[Variant]) -> TokenStream { + let num_variants = variants.len(); + let variant_names = variants.iter().map(|v| &v.name); + + quote! { + if type_.name() != #name { + return false; + } + + match *type_.kind() { + ::postgres_types::Kind::Enum(ref variants) => { + if variants.len() != #num_variants { + return false; + } + + variants.iter().all(|v| { + match &**v { + #( + #variant_names => true, + )* + _ => false, + } + }) + } + _ => false, + } + } +} + +pub fn composite_body(name: &str, trait_: &str, fields: &[Field]) -> TokenStream { + let num_fields = fields.len(); + let trait_ = Ident::new(trait_, Span::call_site()); + let traits = iter::repeat(&trait_); + let field_names = fields.iter().map(|f| &f.name); + let field_types = fields.iter().map(|f| &f.type_); + + quote! { + if type_.name() != #name { + return false; + } + + match *type_.kind() { + ::postgres_types::Kind::Composite(ref fields) => { + if fields.len() != #num_fields { + return false; + } + + fields.iter().all(|f| { + match f.name() { + #( + #field_names => { + <#field_types as ::postgres_types::#traits>::accepts(f.type_()) + } + )* + _ => false, + } + }) + } + _ => false, + } + } +} diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs new file mode 100644 index 000000000..118d5c349 --- /dev/null +++ b/postgres-derive/src/composites.rs @@ -0,0 +1,22 @@ +use syn::{self, Error, Ident, Type}; + +use overrides::Overrides; + +pub struct Field { + pub name: String, + pub ident: Ident, + pub type_: Type, +} + +impl Field { + pub fn parse(raw: &syn::Field) -> Result { + let overrides = Overrides::extract(&raw.attrs)?; + + let ident = raw.ident.as_ref().unwrap().clone(); + Ok(Field { + name: overrides.name.unwrap_or_else(|| ident.to_string()), + ident, + type_: raw.ty.clone(), + }) + } +} diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs new file mode 100644 index 000000000..6dc5fb0d3 --- /dev/null +++ b/postgres-derive/src/enums.rs @@ -0,0 +1,28 @@ +use syn::{self, Error, Fields, Ident}; + +use overrides::Overrides; + +pub struct Variant { + pub ident: Ident, + pub name: String, +} + +impl Variant { + pub fn parse(raw: &syn::Variant) -> Result { + match raw.fields { + Fields::Unit => {} + _ => { + return Err(Error::new_spanned( + raw, + "non-C-like enums are not supported", + )) + } + } + + let overrides = Overrides::extract(&raw.attrs)?; + Ok(Variant { + ident: raw.ident.clone(), + name: overrides.name.unwrap_or_else(|| raw.ident.to_string()), + }) + } +} diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs new file mode 100644 index 000000000..07359b398 --- /dev/null +++ b/postgres-derive/src/fromsql.rs @@ -0,0 +1,200 @@ +use proc_macro2::{Span, TokenStream}; +use std::iter; +use syn::{self, Data, DataStruct, DeriveInput, Error, Fields, Ident}; + +use accepts; +use composites::Field; +use enums::Variant; +use overrides::Overrides; + +pub fn expand_derive_fromsql(input: DeriveInput) -> Result { + let overrides = Overrides::extract(&input.attrs)?; + + let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); + + let (accepts_body, to_sql_body) = match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(Variant::parse) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants), + enum_body(&input.ident, &variants), + ) + } + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + ( + domain_accepts_body(&name, field), + domain_body(&input.ident, field), + ) + } + Data::Struct(DataStruct { + fields: Fields::Named(ref fields), + .. + }) => { + let fields = fields + .named + .iter() + .map(Field::parse) + .collect::, _>>()?; + ( + accepts::composite_body(&name, "FromSql", &fields), + composite_body(&input.ident, &fields), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", + )) + } + }; + + let ident = &input.ident; + let out = quote! { + impl<'a> ::postgres_types::FromSql<'a> for #ident { + fn from_sql(_type: &::postgres_types::Type, buf: &'a [u8]) + -> ::std::result::Result<#ident, + ::std::boxed::Box<::std::error::Error + + ::std::marker::Sync + + ::std::marker::Send>> { + #to_sql_body + } + + fn accepts(type_: &::postgres_types::Type) -> bool { + #accepts_body + } + } + }; + + Ok(out) +} + +fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { + let variant_names = variants.iter().map(|v| &v.name); + let idents = iter::repeat(ident); + let variant_idents = variants.iter().map(|v| &v.ident); + + quote! { + match ::std::str::from_utf8(buf)? { + #( + #variant_names => ::std::result::Result::Ok(#idents::#variant_idents), + )* + s => { + ::std::result::Result::Err( + ::std::convert::Into::into(format!("invalid variant `{}`", s))) + } + } + } +} + +// Domains are sometimes but not always just represented by the bare type (!?) +fn domain_accepts_body(name: &str, field: &syn::Field) -> TokenStream { + let ty = &field.ty; + let normal_body = accepts::domain_body(name, field); + + quote! { + if <#ty as ::postgres_types::FromSql>::accepts(type_) { + return true; + } + + #normal_body + } +} + +fn domain_body(ident: &Ident, field: &syn::Field) -> TokenStream { + let ty = &field.ty; + quote! { + <#ty as ::postgres_types::FromSql>::from_sql(_type, buf).map(#ident) + } +} + +fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { + let temp_vars = &fields + .iter() + .map(|f| Ident::new(&format!("__{}", f.ident), Span::call_site())) + .collect::>(); + let field_names = &fields.iter().map(|f| &f.name).collect::>(); + let field_idents = &fields.iter().map(|f| &f.ident).collect::>(); + + quote! { + fn read_be_i32(buf: &mut &[u8]) -> ::std::io::Result { + let mut bytes = [0; 4]; + ::std::io::Read::read_exact(buf, &mut bytes)?; + let num = ((bytes[0] as i32) << 24) | + ((bytes[1] as i32) << 16) | + ((bytes[2] as i32) << 8) | + (bytes[3] as i32); + ::std::result::Result::Ok(num) + } + + fn read_value<'a, T>(type_: &::postgres_types::Type, + buf: &mut &'a [u8]) + -> ::std::result::Result> + where T: ::postgres_types::FromSql<'a> + { + let len = read_be_i32(buf)?; + let value = if len < 0 { + ::std::option::Option::None + } else { + if len as usize > buf.len() { + return ::std::result::Result::Err( + ::std::convert::Into::into("invalid buffer size")); + } + let (head, tail) = buf.split_at(len as usize); + *buf = tail; + ::std::option::Option::Some(&head[..]) + }; + ::postgres_types::FromSql::from_sql_nullable(type_, value) + } + + let fields = match *_type.kind() { + ::postgres_types::Kind::Composite(ref fields) => fields, + _ => unreachable!(), + }; + + let mut buf = buf; + let num_fields = read_be_i32(&mut buf)?; + if num_fields as usize != fields.len() { + return ::std::result::Result::Err( + ::std::convert::Into::into(format!("invalid field count: {} vs {}", num_fields, + fields.len()))); + } + + #( + let mut #temp_vars = ::std::option::Option::None; + )* + + for field in fields { + let oid = read_be_i32(&mut buf)? as u32; + if oid != field.type_().oid() { + return ::std::result::Result::Err(::std::convert::Into::into("unexpected OID")); + } + + match field.name() { + #( + #field_names => { + #temp_vars = ::std::option::Option::Some( + read_value(field.type_(), &mut buf)?); + } + )* + _ => unreachable!(), + } + } + + ::std::result::Result::Ok(#ident { + #( + #field_idents: #temp_vars.unwrap(), + )* + }) + } +} diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs new file mode 100644 index 000000000..ff29e7cc9 --- /dev/null +++ b/postgres-derive/src/lib.rs @@ -0,0 +1,28 @@ +#![recursion_limit = "256"] + +extern crate proc_macro; +extern crate syn; +#[macro_use] +extern crate quote; +extern crate proc_macro2; + +use proc_macro::TokenStream; + +mod accepts; +mod composites; +mod enums; +mod fromsql; +mod overrides; +mod tosql; + +#[proc_macro_derive(ToSql, attributes(postgres))] +pub fn derive_tosql(input: TokenStream) -> TokenStream { + let input = syn::parse(input).unwrap(); + tosql::expand_derive_tosql(input).unwrap().into() +} + +#[proc_macro_derive(FromSql, attributes(postgres))] +pub fn derive_fromsql(input: TokenStream) -> TokenStream { + let input = syn::parse(input).unwrap(); + fromsql::expand_derive_fromsql(input).unwrap().into() +} diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs new file mode 100644 index 000000000..08e6f3a77 --- /dev/null +++ b/postgres-derive/src/overrides.rs @@ -0,0 +1,49 @@ +use syn::{Attribute, Error, Lit, Meta, NestedMeta}; + +pub struct Overrides { + pub name: Option, +} + +impl Overrides { + pub fn extract(attrs: &[Attribute]) -> Result { + let mut overrides = Overrides { name: None }; + + for attr in attrs { + let attr = match attr.parse_meta() { + Ok(meta) => meta, + Err(_) => continue, + }; + + if !attr.path().is_ident("postgres") { + continue; + } + + let list = match attr { + Meta::List(ref list) => list, + bad => return Err(Error::new_spanned(bad, "expected a #[postgres(...)]")), + }; + + for item in &list.nested { + match item { + NestedMeta::Meta(Meta::NameValue(meta)) => { + if !meta.path.is_ident("name") { + return Err(Error::new_spanned(&meta.path, "unknown override")); + } + + let value = match &meta.lit { + Lit::Str(s) => s.value(), + bad => { + return Err(Error::new_spanned(bad, "expected a string literal")) + } + }; + + overrides.name = Some(value); + } + bad => return Err(Error::new_spanned(bad, "expected a name-value meta item")), + } + } + } + + Ok(overrides) + } +} diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs new file mode 100644 index 000000000..8cefc782c --- /dev/null +++ b/postgres-derive/src/tosql.rs @@ -0,0 +1,160 @@ +use std::iter; +use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; + +use accepts; +use composites::Field; +use enums::Variant; +use overrides::Overrides; +use proc_macro2::TokenStream; + +pub fn expand_derive_tosql(input: DeriveInput) -> Result { + let overrides = Overrides::extract(&input.attrs)?; + + let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); + + let (accepts_body, to_sql_body) = match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(Variant::parse) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants), + enum_body(&input.ident, &variants), + ) + } + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + (accepts::domain_body(&name, &field), domain_body()) + } + Data::Struct(DataStruct { + fields: Fields::Named(ref fields), + .. + }) => { + let fields = fields + .named + .iter() + .map(Field::parse) + .collect::, _>>()?; + ( + accepts::composite_body(&name, "ToSql", &fields), + composite_body(&fields), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", + )); + } + }; + + let ident = &input.ident; + let out = quote! { + impl ::postgres_types::ToSql for #ident { + fn to_sql(&self, + _type: &::postgres_types::Type, + buf: &mut ::std::vec::Vec) + -> ::std::result::Result<::postgres_types::IsNull, + ::std::boxed::Box<::std::error::Error + + ::std::marker::Sync + + ::std::marker::Send>> { + #to_sql_body + } + + fn accepts(type_: &::postgres_types::Type) -> bool { + #accepts_body + } + + ::postgres_types::to_sql_checked!(); + } + }; + + Ok(out) +} + +fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { + let idents = iter::repeat(ident); + let variant_idents = variants.iter().map(|v| &v.ident); + let variant_names = variants.iter().map(|v| &v.name); + + quote! { + let s = match *self { + #( + #idents::#variant_idents => #variant_names, + )* + }; + + buf.extend_from_slice(s.as_bytes()); + ::std::result::Result::Ok(::postgres_types::IsNull::No) + } +} + +fn domain_body() -> TokenStream { + quote! { + let type_ = match *_type.kind() { + ::postgres_types::Kind::Domain(ref type_) => type_, + _ => unreachable!(), + }; + + ::postgres_types::ToSql::to_sql(&self.0, type_, buf) + } +} + +fn composite_body(fields: &[Field]) -> TokenStream { + let field_names = fields.iter().map(|f| &f.name); + let field_idents = fields.iter().map(|f| &f.ident); + + quote! { + fn write_be_i32(buf: &mut W, n: i32) -> ::std::io::Result<()> + where W: ::std::io::Write + { + let be = [(n >> 24) as u8, (n >> 16) as u8, (n >> 8) as u8, n as u8]; + buf.write_all(&be) + } + + let fields = match *_type.kind() { + ::postgres_types::Kind::Composite(ref fields) => fields, + _ => unreachable!(), + }; + + write_be_i32(buf, fields.len() as i32)?; + + for field in fields { + write_be_i32(buf, field.type_().oid() as i32)?; + + let base = buf.len(); + write_be_i32(buf, 0)?; + let r = match field.name() { + #( + #field_names => { + ::postgres_types::ToSql::to_sql(&self.#field_idents, + field.type_(), + buf) + } + )* + _ => unreachable!(), + }; + + let count = match r? { + ::postgres_types::IsNull::Yes => -1, + ::postgres_types::IsNull::No => { + let len = buf.len() - base - 4; + if len > i32::max_value() as usize { + return ::std::result::Result::Err( + ::std::convert::Into::into("value too large to transmit")); + } + len as i32 + } + }; + + write_be_i32(&mut &mut buf[base..base + 4], count)?; + } + + ::std::result::Result::Ok(::postgres_types::IsNull::No) + } +} diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index a3c762a5c..68621724b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Steven Fackler "] edition = "2018" [features] +"derive" = ["postgres-derive"] "with-bit-vec-0_6" = ["bit-vec-06"] "with-chrono-0_4" = ["chrono-04"] "with-eui48-0_4" = ["eui48-04"] @@ -15,6 +16,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] fallible-iterator = "0.2" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } +postgres-derive = { version = "0.3.3", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 745b4a85e..3bb852d76 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -17,6 +17,9 @@ use std::net::IpAddr; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +#[cfg(feature = "derive")] +pub use postgres_derive::{FromSql, ToSql}; + #[cfg(feature = "with-serde_json-1")] pub use crate::serde_json_1::Json; use crate::type_gen::{Inner, Other}; From e69f1583c8a866e57f3c9147675f2f80c8c7d215 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 10 Oct 2019 15:21:45 -0700 Subject: [PATCH 268/819] cfail tests for derive --- postgres-derive-test/Cargo.toml | 4 ++- .../src/compile-fail/invalid-types.rs | 25 +++++++++++++ .../src/compile-fail/invalid-types.stderr | 35 +++++++++++++++++++ .../src/compile-fail/unknown-override.rs | 15 ++++++++ .../src/compile-fail/unknown-override.stderr | 11 ++++++ postgres-derive-test/src/lib.rs | 5 +++ postgres-derive/src/fromsql.rs | 2 +- postgres-derive/src/lib.rs | 8 +++-- 8 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 postgres-derive-test/src/compile-fail/invalid-types.rs create mode 100644 postgres-derive-test/src/compile-fail/invalid-types.stderr create mode 100644 postgres-derive-test/src/compile-fail/unknown-override.rs create mode 100644 postgres-derive-test/src/compile-fail/unknown-override.stderr diff --git a/postgres-derive-test/Cargo.toml b/postgres-derive-test/Cargo.toml index 1632660eb..24fd1614f 100644 --- a/postgres-derive-test/Cargo.toml +++ b/postgres-derive-test/Cargo.toml @@ -4,6 +4,8 @@ version = "0.1.0" authors = ["Steven Fackler "] edition = "2018" -[dependencies] +[dev-dependencies] +trybuild = "1.0" + postgres-types = { path = "../postgres-types", features = ["derive"] } postgres = { path = "../postgres" } diff --git a/postgres-derive-test/src/compile-fail/invalid-types.rs b/postgres-derive-test/src/compile-fail/invalid-types.rs new file mode 100644 index 000000000..ef41ac820 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-types.rs @@ -0,0 +1,25 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(ToSql)] +struct ToSqlUnit; + +#[derive(FromSql)] +struct FromSqlUnit; + +#[derive(ToSql)] +struct ToSqlTuple(i32, i32); + +#[derive(FromSql)] +struct FromSqlTuple(i32, i32); + +#[derive(ToSql)] +enum ToSqlEnum { + Foo(i32), +} + +#[derive(FromSql)] +enum FromSqlEnum { + Foo(i32), +} + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/invalid-types.stderr b/postgres-derive-test/src/compile-fail/invalid-types.stderr new file mode 100644 index 000000000..9b563d58b --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-types.stderr @@ -0,0 +1,35 @@ +error: #[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums + --> $DIR/invalid-types.rs:4:1 + | +4 | struct ToSqlUnit; + | ^^^^^^^^^^^^^^^^^ + +error: #[derive(FromSql)] may only be applied to structs, single field tuple structs, and enums + --> $DIR/invalid-types.rs:7:1 + | +7 | struct FromSqlUnit; + | ^^^^^^^^^^^^^^^^^^^ + +error: #[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums + --> $DIR/invalid-types.rs:10:1 + | +10 | struct ToSqlTuple(i32, i32); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: #[derive(FromSql)] may only be applied to structs, single field tuple structs, and enums + --> $DIR/invalid-types.rs:13:1 + | +13 | struct FromSqlTuple(i32, i32); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: non-C-like enums are not supported + --> $DIR/invalid-types.rs:17:5 + | +17 | Foo(i32), + | ^^^^^^^^ + +error: non-C-like enums are not supported + --> $DIR/invalid-types.rs:22:5 + | +22 | Foo(i32), + | ^^^^^^^^ diff --git a/postgres-derive-test/src/compile-fail/unknown-override.rs b/postgres-derive-test/src/compile-fail/unknown-override.rs new file mode 100644 index 000000000..e4fffd540 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/unknown-override.rs @@ -0,0 +1,15 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(FromSql)] +#[postgres(foo = "bar")] +struct Foo { + a: i32, +} + +#[derive(ToSql)] +#[postgres(foo = "bar")] +struct Bar { + a: i32, +} + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/unknown-override.stderr b/postgres-derive-test/src/compile-fail/unknown-override.stderr new file mode 100644 index 000000000..b7719e3c2 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/unknown-override.stderr @@ -0,0 +1,11 @@ +error: unknown override + --> $DIR/unknown-override.rs:4:12 + | +4 | #[postgres(foo = "bar")] + | ^^^ + +error: unknown override + --> $DIR/unknown-override.rs:10:12 + | +10 | #[postgres(foo = "bar")] + | ^^^ diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index 0bf429e38..7da75af8f 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -25,3 +25,8 @@ where assert_eq!(val, &result); } } + +#[test] +fn compile_fail() { + trybuild::TestCases::new().compile_fail("src/compile-fail/*.rs"); +} diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 07359b398..c4a2447f8 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -51,7 +51,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { _ => { return Err(Error::new_spanned( input, - "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", + "#[derive(FromSql)] may only be applied to structs, single field tuple structs, and enums", )) } }; diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index ff29e7cc9..252526382 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -18,11 +18,15 @@ mod tosql; #[proc_macro_derive(ToSql, attributes(postgres))] pub fn derive_tosql(input: TokenStream) -> TokenStream { let input = syn::parse(input).unwrap(); - tosql::expand_derive_tosql(input).unwrap().into() + tosql::expand_derive_tosql(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(FromSql, attributes(postgres))] pub fn derive_fromsql(input: TokenStream) -> TokenStream { let input = syn::parse(input).unwrap(); - fromsql::expand_derive_fromsql(input).unwrap().into() + fromsql::expand_derive_fromsql(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } From e62fe3d7c34bfaa79074a8bb3deeb4f8c81ed9b1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 10 Oct 2019 15:23:53 -0700 Subject: [PATCH 269/819] Migrate postgres-derive to 2018 --- postgres-derive/Cargo.toml | 3 ++- postgres-derive/src/accepts.rs | 5 +++-- postgres-derive/src/composites.rs | 4 ++-- postgres-derive/src/enums.rs | 4 ++-- postgres-derive/src/fromsql.rs | 11 ++++++----- postgres-derive/src/lib.rs | 5 ----- postgres-derive/src/tosql.rs | 11 ++++++----- 7 files changed, 21 insertions(+), 22 deletions(-) diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 3dea8d98d..9121e8ee4 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -3,8 +3,9 @@ name = "postgres-derive" version = "0.3.3" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" +edition = "2018" description = "Deriving plugin support for Postgres enum, domain, and composite types" -repository = "https://github.com/sfackler/rust-postgres-derive" +repository = "https://github.com/sfackler/rust-postgres" readme = "README.md" keywords = ["database", "postgres", "postgresql", "sql"] diff --git a/postgres-derive/src/accepts.rs b/postgres-derive/src/accepts.rs index a7b8c6b5e..530badd0b 100644 --- a/postgres-derive/src/accepts.rs +++ b/postgres-derive/src/accepts.rs @@ -1,9 +1,10 @@ use proc_macro2::{Span, TokenStream}; +use quote::quote; use std::iter; use syn::Ident; -use composites::Field; -use enums::Variant; +use crate::composites::Field; +use crate::enums::Variant; pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { let ty = &field.ty; diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index 118d5c349..f5599d375 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -1,6 +1,6 @@ -use syn::{self, Error, Ident, Type}; +use syn::{Error, Ident, Type}; -use overrides::Overrides; +use crate::overrides::Overrides; pub struct Field { pub name: String, diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs index 6dc5fb0d3..3c6bc7113 100644 --- a/postgres-derive/src/enums.rs +++ b/postgres-derive/src/enums.rs @@ -1,6 +1,6 @@ -use syn::{self, Error, Fields, Ident}; +use syn::{Error, Fields, Ident}; -use overrides::Overrides; +use crate::overrides::Overrides; pub struct Variant { pub ident: Ident, diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index c4a2447f8..7c851396c 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -1,11 +1,12 @@ use proc_macro2::{Span, TokenStream}; +use quote::quote; use std::iter; -use syn::{self, Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; -use accepts; -use composites::Field; -use enums::Variant; -use overrides::Overrides; +use crate::accepts; +use crate::composites::Field; +use crate::enums::Variant; +use crate::overrides::Overrides; pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs)?; diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index 252526382..9ca8ec20b 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -1,10 +1,5 @@ #![recursion_limit = "256"] - extern crate proc_macro; -extern crate syn; -#[macro_use] -extern crate quote; -extern crate proc_macro2; use proc_macro::TokenStream; diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 8cefc782c..dda59581c 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -1,11 +1,12 @@ +use proc_macro2::TokenStream; +use quote::quote; use std::iter; use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; -use accepts; -use composites::Field; -use enums::Variant; -use overrides::Overrides; -use proc_macro2::TokenStream; +use crate::accepts; +use crate::composites::Field; +use crate::enums::Variant; +use crate::overrides::Overrides; pub fn expand_derive_tosql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs)?; From 01cc7e471578d32ba2c4de90e393f459107f16bf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 10 Oct 2019 16:03:48 -0700 Subject: [PATCH 270/819] Clean up derived code --- postgres-derive/src/fromsql.rs | 80 +++++++++--------------------- postgres-derive/src/tosql.rs | 57 +++++++++------------ postgres-types/src/lib.rs | 3 ++ postgres-types/src/private.rs | 33 ++++++++++++ tokio-postgres/src/client.rs | 11 ++-- tokio-postgres/src/connect.rs | 3 +- tokio-postgres/src/simple_query.rs | 5 +- tokio-postgres/src/transaction.rs | 9 ++-- tokio-postgres/tests/test/main.rs | 26 ++++++++-- 9 files changed, 113 insertions(+), 114 deletions(-) create mode 100644 postgres-types/src/private.rs diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 7c851396c..e1ab6ffa7 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -59,16 +59,16 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let ident = &input.ident; let out = quote! { - impl<'a> ::postgres_types::FromSql<'a> for #ident { - fn from_sql(_type: &::postgres_types::Type, buf: &'a [u8]) - -> ::std::result::Result<#ident, - ::std::boxed::Box<::std::error::Error + - ::std::marker::Sync + - ::std::marker::Send>> { + impl<'a> postgres_types::FromSql<'a> for #ident { + fn from_sql(_type: &postgres_types::Type, buf: &'a [u8]) + -> std::result::Result<#ident, + std::boxed::Box> { #to_sql_body } - fn accepts(type_: &::postgres_types::Type) -> bool { + fn accepts(type_: &postgres_types::Type) -> bool { #accepts_body } } @@ -83,13 +83,13 @@ fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { let variant_idents = variants.iter().map(|v| &v.ident); quote! { - match ::std::str::from_utf8(buf)? { + match std::str::from_utf8(buf)? { #( - #variant_names => ::std::result::Result::Ok(#idents::#variant_idents), + #variant_names => std::result::Result::Ok(#idents::#variant_idents), )* s => { - ::std::result::Result::Err( - ::std::convert::Into::into(format!("invalid variant `{}`", s))) + std::result::Result::Err( + std::convert::Into::into(format!("invalid variant `{}`", s))) } } } @@ -101,7 +101,7 @@ fn domain_accepts_body(name: &str, field: &syn::Field) -> TokenStream { let normal_body = accepts::domain_body(name, field); quote! { - if <#ty as ::postgres_types::FromSql>::accepts(type_) { + if <#ty as postgres_types::FromSql>::accepts(type_) { return true; } @@ -112,7 +112,7 @@ fn domain_accepts_body(name: &str, field: &syn::Field) -> TokenStream { fn domain_body(ident: &Ident, field: &syn::Field) -> TokenStream { let ty = &field.ty; quote! { - <#ty as ::postgres_types::FromSql>::from_sql(_type, buf).map(#ident) + <#ty as postgres_types::FromSql>::from_sql(_type, buf).map(#ident) } } @@ -125,74 +125,40 @@ fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { let field_idents = &fields.iter().map(|f| &f.ident).collect::>(); quote! { - fn read_be_i32(buf: &mut &[u8]) -> ::std::io::Result { - let mut bytes = [0; 4]; - ::std::io::Read::read_exact(buf, &mut bytes)?; - let num = ((bytes[0] as i32) << 24) | - ((bytes[1] as i32) << 16) | - ((bytes[2] as i32) << 8) | - (bytes[3] as i32); - ::std::result::Result::Ok(num) - } - - fn read_value<'a, T>(type_: &::postgres_types::Type, - buf: &mut &'a [u8]) - -> ::std::result::Result> - where T: ::postgres_types::FromSql<'a> - { - let len = read_be_i32(buf)?; - let value = if len < 0 { - ::std::option::Option::None - } else { - if len as usize > buf.len() { - return ::std::result::Result::Err( - ::std::convert::Into::into("invalid buffer size")); - } - let (head, tail) = buf.split_at(len as usize); - *buf = tail; - ::std::option::Option::Some(&head[..]) - }; - ::postgres_types::FromSql::from_sql_nullable(type_, value) - } - let fields = match *_type.kind() { - ::postgres_types::Kind::Composite(ref fields) => fields, + postgres_types::Kind::Composite(ref fields) => fields, _ => unreachable!(), }; let mut buf = buf; - let num_fields = read_be_i32(&mut buf)?; + let num_fields = postgres_types::private::read_be_i32(&mut buf)?; if num_fields as usize != fields.len() { - return ::std::result::Result::Err( - ::std::convert::Into::into(format!("invalid field count: {} vs {}", num_fields, - fields.len()))); + return std::result::Result::Err( + std::convert::Into::into(format!("invalid field count: {} vs {}", num_fields, fields.len()))); } #( - let mut #temp_vars = ::std::option::Option::None; + let mut #temp_vars = std::option::Option::None; )* for field in fields { - let oid = read_be_i32(&mut buf)? as u32; + let oid = postgres_types::private::read_be_i32(&mut buf)? as u32; if oid != field.type_().oid() { - return ::std::result::Result::Err(::std::convert::Into::into("unexpected OID")); + return std::result::Result::Err(std::convert::Into::into("unexpected OID")); } match field.name() { #( #field_names => { - #temp_vars = ::std::option::Option::Some( - read_value(field.type_(), &mut buf)?); + #temp_vars = std::option::Option::Some( + postgres_types::private::read_value(field.type_(), &mut buf)?); } )* _ => unreachable!(), } } - ::std::result::Result::Ok(#ident { + std::result::Result::Ok(#ident { #( #field_idents: #temp_vars.unwrap(), )* diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index dda59581c..0aeb21094 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -56,22 +56,22 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { let ident = &input.ident; let out = quote! { - impl ::postgres_types::ToSql for #ident { + impl postgres_types::ToSql for #ident { fn to_sql(&self, - _type: &::postgres_types::Type, - buf: &mut ::std::vec::Vec) - -> ::std::result::Result<::postgres_types::IsNull, - ::std::boxed::Box<::std::error::Error + - ::std::marker::Sync + - ::std::marker::Send>> { + _type: &postgres_types::Type, + buf: &mut std::vec::Vec) + -> std::result::Result> { #to_sql_body } - fn accepts(type_: &::postgres_types::Type) -> bool { + fn accepts(type_: &postgres_types::Type) -> bool { #accepts_body } - ::postgres_types::to_sql_checked!(); + postgres_types::to_sql_checked!(); } }; @@ -91,18 +91,18 @@ fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { }; buf.extend_from_slice(s.as_bytes()); - ::std::result::Result::Ok(::postgres_types::IsNull::No) + std::result::Result::Ok(postgres_types::IsNull::No) } } fn domain_body() -> TokenStream { quote! { let type_ = match *_type.kind() { - ::postgres_types::Kind::Domain(ref type_) => type_, + postgres_types::Kind::Domain(ref type_) => type_, _ => unreachable!(), }; - ::postgres_types::ToSql::to_sql(&self.0, type_, buf) + postgres_types::ToSql::to_sql(&self.0, type_, buf) } } @@ -111,51 +111,40 @@ fn composite_body(fields: &[Field]) -> TokenStream { let field_idents = fields.iter().map(|f| &f.ident); quote! { - fn write_be_i32(buf: &mut W, n: i32) -> ::std::io::Result<()> - where W: ::std::io::Write - { - let be = [(n >> 24) as u8, (n >> 16) as u8, (n >> 8) as u8, n as u8]; - buf.write_all(&be) - } - let fields = match *_type.kind() { - ::postgres_types::Kind::Composite(ref fields) => fields, + postgres_types::Kind::Composite(ref fields) => fields, _ => unreachable!(), }; - write_be_i32(buf, fields.len() as i32)?; + buf.extend_from_slice(&(fields.len() as i32).to_be_bytes()); for field in fields { - write_be_i32(buf, field.type_().oid() as i32)?; + buf.extend_from_slice(&field.type_().oid().to_be_bytes()); let base = buf.len(); - write_be_i32(buf, 0)?; + buf.extend_from_slice(&[0; 4]); let r = match field.name() { #( - #field_names => { - ::postgres_types::ToSql::to_sql(&self.#field_idents, - field.type_(), - buf) - } + #field_names => postgres_types::ToSql::to_sql(&self.#field_idents, field.type_(), buf), )* _ => unreachable!(), }; let count = match r? { - ::postgres_types::IsNull::Yes => -1, - ::postgres_types::IsNull::No => { + postgres_types::IsNull::Yes => -1, + postgres_types::IsNull::No => { let len = buf.len() - base - 4; if len > i32::max_value() as usize { - return ::std::result::Result::Err( - ::std::convert::Into::into("value too large to transmit")); + return std::result::Result::Err( + std::convert::Into::into("value too large to transmit")); } len as i32 } }; - write_be_i32(&mut &mut buf[base..base + 4], count)?; + buf[base..base + 4].copy_from_slice(&count.to_be_bytes()); } - ::std::result::Result::Ok(::postgres_types::IsNull::No) + std::result::Result::Ok(postgres_types::IsNull::No) } } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 3bb852d76..033390512 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -96,6 +96,9 @@ mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; +#[cfg(feature = "derive")] +#[doc(hidden)] +pub mod private; mod special; mod type_gen; diff --git a/postgres-types/src/private.rs b/postgres-types/src/private.rs new file mode 100644 index 000000000..61ea3656f --- /dev/null +++ b/postgres-types/src/private.rs @@ -0,0 +1,33 @@ +use crate::{FromSql, Type}; +use std::error::Error; + +pub fn read_be_i32(buf: &mut &[u8]) -> Result> { + if buf.len() < 4 { + return Err("invalid buffer size".into()); + } + let mut bytes = [0; 4]; + bytes.copy_from_slice(&buf[..4]); + *buf = &buf[4..]; + Ok(i32::from_be_bytes(bytes)) +} + +pub fn read_value<'a, T>( + type_: &Type, + buf: &mut &'a [u8], +) -> Result> +where + T: FromSql<'a>, +{ + let len = read_be_i32(buf)?; + let value = if len < 0 { + None + } else { + if len as usize > buf.len() { + return Err("invalid buffer size".into()); + } + let (head, tail) = buf.split_at(len as usize); + *buf = tail; + Some(head) + }; + T::from_sql_nullable(type_, value) +} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 53c8a138c..060904fe2 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,13 +3,12 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; -use crate::simple_query::SimpleQueryStream; use crate::copy_out::CopyStream; use crate::query::RowStream; +use crate::simple_query::SimpleQueryStream; use crate::slice_iter; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; -use pin_utils::pin_mut; use crate::tls::TlsConnect; use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; @@ -25,6 +24,7 @@ use futures::channel::mpsc; use futures::{future, TryStream, TryStreamExt}; use futures::{ready, StreamExt}; use parking_lot::Mutex; +use pin_utils::pin_mut; use postgres_protocol::message::backend::Message; use std::collections::HashMap; use std::error; @@ -240,7 +240,7 @@ impl Client { params: &[&(dyn ToSql + Sync)], ) -> Result where - T: ?Sized + ToStatement + T: ?Sized + ToStatement, { let stream = self.query_raw(statement, slice_iter(params)).await?; pin_mut!(stream); @@ -387,10 +387,7 @@ impl Client { /// Prepared statements should be use for any query which contains user-specified data, as they provided the /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! - pub async fn simple_query( - &self, - query: &str, - ) -> Result, Error> { + pub async fn simple_query(&self, query: &str) -> Result, Error> { self.simple_query_raw(query).await?.try_collect().await } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 549b5148a..5452d1e7c 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -82,7 +82,8 @@ where } rows.as_mut().poll(cx) - }).await?; + }) + .await?; pin_mut!(rows); loop { diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 98e0f0e95..fb9154fb3 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -10,10 +10,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -pub async fn simple_query( - client: &InnerClient, - query: &str, -) -> Result { +pub async fn simple_query(client: &InnerClient, query: &str) -> Result { let buf = encode(query)?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index e8c2a6cf2..cda11e0e8 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -112,8 +112,8 @@ impl<'a> Transaction<'a> { statement: &T, params: &[&(dyn ToSql + Sync)], ) -> Result - where - T: ?Sized + ToStatement, + where + T: ?Sized + ToStatement, { self.client.query_one(statement, params).await } @@ -236,10 +236,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::simple_query`. - pub async fn simple_query( - &self, - query: &str, - ) -> Result, Error> { + pub async fn simple_query(&self, query: &str) -> Result, Error> { self.client.simple_query(query).await } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 8c282c6e9..17e82dbe0 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -647,14 +647,30 @@ async fn check_send() { async fn query_one() { let client = connect("user=postgres").await; - client.batch_execute(" + client + .batch_execute( + " CREATE TEMPORARY TABLE foo ( name TEXT ); INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('carol'); - ").await.unwrap(); + ", + ) + .await + .unwrap(); - client.query_one("SELECT * FROM foo WHERE name = 'dave'", &[]).await.err().unwrap(); - client.query_one("SELECT * FROM foo WHERE name = 'alice'", &[]).await.unwrap(); - client.query_one("SELECT * FROM foo", &[]).await.err().unwrap(); + client + .query_one("SELECT * FROM foo WHERE name = 'dave'", &[]) + .await + .err() + .unwrap(); + client + .query_one("SELECT * FROM foo WHERE name = 'alice'", &[]) + .await + .unwrap(); + client + .query_one("SELECT * FROM foo", &[]) + .await + .err() + .unwrap(); } From e99d65ed9798a30f5954458cdfa5f7a4ec95fe97 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 10 Oct 2019 17:06:54 -0700 Subject: [PATCH 271/819] Derive documentation --- postgres-types/src/lib.rs | 103 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 033390512..3025df650 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -2,6 +2,109 @@ //! //! This crate is used by the `tokio-postgres` and `postgres` crates. You normally don't need to depend directly on it //! unless you want to define your own `ToSql` or `FromSql` definitions. +//! +//! # Derive +//! +//! If the `derive` cargo feature is enabled, you can derive `ToSql` and `FromSql` implementations for custom Postgres +//! types. +//! +//! ## Enums +//! +//! Postgres enums correspond to C-like enums in Rust: +//! +//! ```sql +//! CREATE TYPE "Mood" AS ENUM ( +//! 'Sad', +//! 'Ok', +//! 'Happy' +//! ); +//! ``` +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! enum Mood { +//! Sad, +//! Ok, +//! Happy, +//! } +//! ``` +//! +//! ## Domains +//! +//! Postgres domains correspond to tuple structs with one member in Rust: +//! +//! ```sql +//! CREATE DOMAIN "SessionId" AS BYTEA CHECK(octet_length(VALUE) = 16); +//! ``` +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! struct SessionId(Vec); +//! ``` +//! +//! ## Composites +//! +//! Postgres composite types correspond to structs in Rust: +//! +//! ```sql +//! CREATE TYPE "InventoryItem" AS ( +//! name TEXT, +//! supplier_id INT, +//! price DOUBLE PRECISION +//! ); +//! ``` +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! struct InventoryItem { +//! name: String, +//! supplier_id: i32, +//! price: Option, +//! } +//! ``` +//! +//! ## Naming +//! +//! The derived implementations will enforce exact matches of type, field, and variant names between the Rust and +//! Postgres types. The `#[postgres(name = "...")]` attribute can be used to adjust the name on a type, variant, or +//! field: +//! +//! ```sql +//! CREATE TYPE mood AS ENUM ( +//! 'sad', +//! 'ok', +//! 'happy' +//! ); +//! ``` +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(name = "mood")] +//! enum Mood { +//! #[postgres(name = "sad")] +//! Sad, +//! #[postgres(name = "ok")] +//! Ok, +//! #[postgres(name = "happy")] +//! Happy, +//! } +//! ``` #![warn(missing_docs)] use fallible_iterator::FallibleIterator; From ffd7245e54e8524881e9a9ebb5d6661bcd4c010c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 12 Oct 2019 16:30:27 -0700 Subject: [PATCH 272/819] Use BytesMut for messages Benchmarks indicate that malloc accounts for a significant amount of the runtime of queries. The message buffer accounts for ~half of that (the other being channels), and this change should eliminate it. --- postgres-derive/src/tosql.rs | 2 +- postgres-protocol/src/lib.rs | 32 ++- postgres-protocol/src/message/frontend.rs | 335 +++++++--------------- postgres-protocol/src/types/mod.rs | 157 +++++----- postgres-types/Cargo.toml | 1 + postgres-types/src/bit_vec_06.rs | 3 +- postgres-types/src/chrono_04.rs | 13 +- postgres-types/src/eui48_04.rs | 3 +- postgres-types/src/geo_types_04.rs | 7 +- postgres-types/src/lib.rs | 70 +++-- postgres-types/src/private.rs | 1 + postgres-types/src/serde_json_1.rs | 13 +- postgres-types/src/special.rs | 13 +- postgres-types/src/uuid_07.rs | 3 +- tokio-postgres/src/bind.rs | 7 +- tokio-postgres/src/cancel_query_raw.rs | 3 +- tokio-postgres/src/client.rs | 11 +- tokio-postgres/src/codec.rs | 4 +- tokio-postgres/src/connect_raw.rs | 17 +- tokio-postgres/src/connect_tls.rs | 3 +- tokio-postgres/src/connection.rs | 5 +- tokio-postgres/src/copy_in.rs | 10 +- tokio-postgres/src/copy_out.rs | 4 +- tokio-postgres/src/portal.rs | 5 +- tokio-postgres/src/prepare.rs | 19 +- tokio-postgres/src/query.rs | 36 +-- tokio-postgres/src/simple_query.rs | 14 +- tokio-postgres/src/statement.rs | 5 +- tokio-postgres/src/transaction.rs | 5 +- tokio-postgres/tests/test/types/mod.rs | 3 +- 30 files changed, 390 insertions(+), 414 deletions(-) diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 0aeb21094..a1c87b0ff 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -59,7 +59,7 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { impl postgres_types::ToSql for #ident { fn to_sql(&self, _type: &postgres_types::Type, - buf: &mut std::vec::Vec) + buf: &mut postgres_types::private::BytesMut) -> std::result::Result(serializer: F, buf: &mut Vec) -> Result<(), E> +// https://github.com/tokio-rs/bytes/issues/170 +struct B<'a>(&'a mut BytesMut); + +impl<'a> BufMut for B<'a> { + #[inline] + fn remaining_mut(&self) -> usize { + usize::max_value() - self.0.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + self.0.advance_mut(cnt); + } + + #[inline] + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + if !self.0.has_remaining_mut() { + self.0.reserve(64); + } + + self.0.bytes_mut() + } +} + +fn write_nullable(serializer: F, buf: &mut BytesMut) -> Result<(), E> where - F: FnOnce(&mut Vec) -> Result, + F: FnOnce(&mut BytesMut) -> Result, E: From, { let base = buf.len(); - buf.extend_from_slice(&[0; 4]); + B(buf).put_i32_be(0); let size = match serializer(buf)? { IsNull::No => i32::from_usize(buf.len() - base - 4)?, IsNull::Yes => -1, diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 6daed7d9d..697316ce8 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -1,157 +1,19 @@ //! Frontend message serialization. #![allow(missing_docs)] -use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; +use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use std::convert::TryFrom; use std::error::Error; use std::io; use std::marker; -use crate::{write_nullable, FromUsize, IsNull, Oid}; - -pub enum Message<'a> { - Bind { - portal: &'a str, - statement: &'a str, - formats: &'a [i16], - values: &'a [Option>], - result_formats: &'a [i16], - }, - CancelRequest { - process_id: i32, - secret_key: i32, - }, - Close { - variant: u8, - name: &'a str, - }, - CopyData { - data: &'a [u8], - }, - CopyDone, - CopyFail { - message: &'a str, - }, - Describe { - variant: u8, - name: &'a str, - }, - Execute { - portal: &'a str, - max_rows: i32, - }, - Parse { - name: &'a str, - query: &'a str, - param_types: &'a [Oid], - }, - PasswordMessage { - password: &'a [u8], - }, - Query { - query: &'a str, - }, - SaslInitialResponse { - mechanism: &'a str, - data: &'a [u8], - }, - SaslResponse { - data: &'a [u8], - }, - SslRequest, - StartupMessage { - parameters: &'a [(String, String)], - }, - Sync, - Terminate, - #[doc(hidden)] - __ForExtensibility, -} - -impl<'a> Message<'a> { - #[inline] - pub fn serialize(&self, buf: &mut Vec) -> io::Result<()> { - match *self { - Message::Bind { - portal, - statement, - formats, - values, - result_formats, - } => { - let r = bind( - portal, - statement, - formats.iter().cloned(), - values, - |v, buf| match *v { - Some(ref v) => { - buf.extend_from_slice(v); - Ok(IsNull::No) - } - None => Ok(IsNull::Yes), - }, - result_formats.iter().cloned(), - buf, - ); - match r { - Ok(()) => Ok(()), - Err(BindError::Conversion(_)) => unreachable!(), - Err(BindError::Serialization(e)) => Err(e), - } - } - Message::CancelRequest { - process_id, - secret_key, - } => { - cancel_request(process_id, secret_key, buf); - Ok(()) - } - Message::Close { variant, name } => close(variant, name, buf), - Message::CopyData { data } => copy_data(data, buf), - Message::CopyDone => { - copy_done(buf); - Ok(()) - } - Message::CopyFail { message } => copy_fail(message, buf), - Message::Describe { variant, name } => describe(variant, name, buf), - Message::Execute { portal, max_rows } => execute(portal, max_rows, buf), - Message::Parse { - name, - query, - param_types, - } => parse(name, query, param_types.iter().cloned(), buf), - Message::PasswordMessage { password } => password_message(password, buf), - Message::Query { query: q } => query(q, buf), - Message::SaslInitialResponse { mechanism, data } => { - sasl_initial_response(mechanism, data, buf) - } - Message::SaslResponse { data } => sasl_response(data, buf), - Message::SslRequest => { - ssl_request(buf); - Ok(()) - } - Message::StartupMessage { parameters } => { - startup_message(parameters.iter().map(|&(ref k, ref v)| (&**k, &**v)), buf) - } - Message::Sync => { - sync(buf); - Ok(()) - } - Message::Terminate => { - terminate(buf); - Ok(()) - } - Message::__ForExtensibility => unreachable!(), - } - } -} +use crate::{write_nullable, FromUsize, IsNull, Oid, B}; #[inline] -fn write_body(buf: &mut Vec, f: F) -> Result<(), E> +fn write_body(buf: &mut BytesMut, f: F) -> Result<(), E> where - F: FnOnce(&mut Vec) -> Result<(), E>, + F: FnOnce(&mut BytesMut) -> Result<(), E>, E: From, { let base = buf.len(); @@ -191,36 +53,50 @@ pub fn bind( values: J, mut serializer: F, result_formats: K, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), BindError> where I: IntoIterator, J: IntoIterator, - F: FnMut(T, &mut Vec) -> Result>, + F: FnMut(T, &mut BytesMut) -> Result>, K: IntoIterator, { - buf.push(b'B'); + B(buf).put_u8(b'B'); write_body(buf, |buf| { - buf.write_cstr(portal.as_bytes())?; - buf.write_cstr(statement.as_bytes())?; - write_counted(formats, |f, buf| buf.write_i16::(f), buf)?; + write_cstr(portal.as_bytes(), buf)?; + write_cstr(statement.as_bytes(), buf)?; + write_counted( + formats, + |f, buf| { + B(buf).put_i16_be(f); + Ok::<_, io::Error>(()) + }, + buf, + )?; write_counted( values, |v, buf| write_nullable(|buf| serializer(v, buf), buf), buf, )?; - write_counted(result_formats, |f, buf| buf.write_i16::(f), buf)?; + write_counted( + result_formats, + |f, buf| { + B(buf).put_i16_be(f); + Ok::<_, io::Error>(()) + }, + buf, + )?; Ok(()) }) } #[inline] -fn write_counted(items: I, mut serializer: F, buf: &mut Vec) -> Result<(), E> +fn write_counted(items: I, mut serializer: F, buf: &mut BytesMut) -> Result<(), E> where I: IntoIterator, - F: FnMut(T, &mut Vec) -> Result<(), E>, + F: FnMut(T, &mut BytesMut) -> Result<(), E>, E: From, { let base = buf.len(); @@ -237,31 +113,22 @@ where } #[inline] -pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut Vec) { +pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) { write_body(buf, |buf| { - buf.write_i32::(80_877_102).unwrap(); - buf.write_i32::(process_id).unwrap(); - buf.write_i32::(secret_key) + B(buf).put_i32_be(80_877_102); + B(buf).put_i32_be(process_id); + B(buf).put_i32_be(secret_key); + Ok::<_, io::Error>(()) }) .unwrap(); } #[inline] -pub fn close(variant: u8, name: &str, buf: &mut Vec) -> io::Result<()> { - buf.push(b'C'); +pub fn close(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'C'); write_body(buf, |buf| { - buf.push(variant); - buf.write_cstr(name.as_bytes()) - }) -} - -// FIXME ideally this'd take a Read but it's unclear what to do at EOF -#[inline] -pub fn copy_data(data: &[u8], buf: &mut Vec) -> io::Result<()> { - buf.push(b'd'); - write_body(buf, |buf| { - buf.extend_from_slice(data); - Ok(()) + B(buf).put_u8(variant); + write_cstr(name.as_bytes(), buf) }) } @@ -292,139 +159,143 @@ where } pub fn write(self, out: &mut BytesMut) { - out.reserve(self.len as usize + 1); - out.put_u8(b'd'); - out.put_i32_be(self.len); - out.put(self.buf); + B(out).put_u8(b'd'); + B(out).put_i32_be(self.len); + B(out).put(self.buf); } } #[inline] -pub fn copy_done(buf: &mut Vec) { - buf.push(b'c'); +pub fn copy_done(buf: &mut BytesMut) { + B(buf).put_u8(b'c'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] -pub fn copy_fail(message: &str, buf: &mut Vec) -> io::Result<()> { - buf.push(b'f'); - write_body(buf, |buf| buf.write_cstr(message.as_bytes())) +pub fn copy_fail(message: &str, buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'f'); + write_body(buf, |buf| write_cstr(message.as_bytes(), buf)) } #[inline] -pub fn describe(variant: u8, name: &str, buf: &mut Vec) -> io::Result<()> { - buf.push(b'D'); +pub fn describe(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'D'); write_body(buf, |buf| { - buf.push(variant); - buf.write_cstr(name.as_bytes()) + B(buf).put_u8(variant); + write_cstr(name.as_bytes(), buf) }) } #[inline] -pub fn execute(portal: &str, max_rows: i32, buf: &mut Vec) -> io::Result<()> { - buf.push(b'E'); +pub fn execute(portal: &str, max_rows: i32, buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'E'); write_body(buf, |buf| { - buf.write_cstr(portal.as_bytes())?; - buf.write_i32::(max_rows).unwrap(); + write_cstr(portal.as_bytes(), buf)?; + B(buf).put_i32_be(max_rows); Ok(()) }) } #[inline] -pub fn parse(name: &str, query: &str, param_types: I, buf: &mut Vec) -> io::Result<()> +pub fn parse(name: &str, query: &str, param_types: I, buf: &mut BytesMut) -> io::Result<()> where I: IntoIterator, { - buf.push(b'P'); + B(buf).put_u8(b'P'); write_body(buf, |buf| { - buf.write_cstr(name.as_bytes())?; - buf.write_cstr(query.as_bytes())?; - write_counted(param_types, |t, buf| buf.write_u32::(t), buf)?; + write_cstr(name.as_bytes(), buf)?; + write_cstr(query.as_bytes(), buf)?; + write_counted( + param_types, + |t, buf| { + B(buf).put_u32_be(t); + Ok::<_, io::Error>(()) + }, + buf, + )?; Ok(()) }) } #[inline] -pub fn password_message(password: &[u8], buf: &mut Vec) -> io::Result<()> { - buf.push(b'p'); - write_body(buf, |buf| buf.write_cstr(password)) +pub fn password_message(password: &[u8], buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'p'); + write_body(buf, |buf| write_cstr(password, buf)) } #[inline] -pub fn query(query: &str, buf: &mut Vec) -> io::Result<()> { - buf.push(b'Q'); - write_body(buf, |buf| buf.write_cstr(query.as_bytes())) +pub fn query(query: &str, buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'Q'); + write_body(buf, |buf| write_cstr(query.as_bytes(), buf)) } #[inline] -pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut Vec) -> io::Result<()> { - buf.push(b'p'); +pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'p'); write_body(buf, |buf| { - buf.write_cstr(mechanism.as_bytes())?; + write_cstr(mechanism.as_bytes(), buf)?; let len = i32::from_usize(data.len())?; - buf.write_i32::(len)?; - buf.extend_from_slice(data); + B(buf).put_i32_be(len); + B(buf).put_slice(data); Ok(()) }) } #[inline] -pub fn sasl_response(data: &[u8], buf: &mut Vec) -> io::Result<()> { - buf.push(b'p'); +pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> { + B(buf).put_u8(b'p'); write_body(buf, |buf| { - buf.extend_from_slice(data); + B(buf).put_slice(data); Ok(()) }) } #[inline] -pub fn ssl_request(buf: &mut Vec) { - write_body(buf, |buf| buf.write_i32::(80_877_103)).unwrap(); +pub fn ssl_request(buf: &mut BytesMut) { + write_body(buf, |buf| { + B(buf).put_i32_be(80_877_103); + Ok::<_, io::Error>(()) + }) + .unwrap(); } #[inline] -pub fn startup_message<'a, I>(parameters: I, buf: &mut Vec) -> io::Result<()> +pub fn startup_message<'a, I>(parameters: I, buf: &mut BytesMut) -> io::Result<()> where I: IntoIterator, { write_body(buf, |buf| { - buf.write_i32::(196_608).unwrap(); + B(buf).put_i32_be(196_608); for (key, value) in parameters { - buf.write_cstr(key.as_bytes())?; - buf.write_cstr(value.as_bytes())?; + write_cstr(key.as_bytes(), buf)?; + write_cstr(value.as_bytes(), buf)?; } - buf.push(0); + B(buf).put_u8(0); Ok(()) }) } #[inline] -pub fn sync(buf: &mut Vec) { - buf.push(b'S'); +pub fn sync(buf: &mut BytesMut) { + B(buf).put_u8(b'S'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] -pub fn terminate(buf: &mut Vec) { - buf.push(b'X'); +pub fn terminate(buf: &mut BytesMut) { + B(buf).put_u8(b'X'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } -trait WriteCStr { - fn write_cstr(&mut self, s: &[u8]) -> Result<(), io::Error>; -} - -impl WriteCStr for Vec { - #[inline] - fn write_cstr(&mut self, s: &[u8]) -> Result<(), io::Error> { - if s.contains(&0) { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "string contains embedded null", - )); - } - self.extend_from_slice(s); - self.push(0); - Ok(()) +#[inline] +fn write_cstr(s: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> { + if s.contains(&0) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "string contains embedded null", + )); } + B(buf).put_slice(s); + B(buf).put_u8(0); + Ok(()) } diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 0a93692ff..8af7486e7 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -1,5 +1,6 @@ //! Conversions to and from Postgres's binary format for various types. -use byteorder::{BigEndian, ByteOrder, ReadBytesExt, WriteBytesExt}; +use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; +use bytes::{BufMut, BytesMut}; use fallible_iterator::FallibleIterator; use std::boxed::Box as StdBox; use std::error::Error; @@ -7,7 +8,7 @@ use std::io::Read; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; -use crate::{write_nullable, FromUsize, IsNull, Oid}; +use crate::{write_nullable, FromUsize, IsNull, Oid, B}; #[cfg(test)] mod test; @@ -23,8 +24,8 @@ const PGSQL_AF_INET6: u8 = 3; /// Serializes a `BOOL` value. #[inline] -pub fn bool_to_sql(v: bool, buf: &mut Vec) { - buf.push(v as u8); +pub fn bool_to_sql(v: bool, buf: &mut BytesMut) { + B(buf).put_u8(v as u8); } /// Deserializes a `BOOL` value. @@ -39,8 +40,8 @@ pub fn bool_from_sql(buf: &[u8]) -> Result /// Serializes a `BYTEA` value. #[inline] -pub fn bytea_to_sql(v: &[u8], buf: &mut Vec) { - buf.extend_from_slice(v); +pub fn bytea_to_sql(v: &[u8], buf: &mut BytesMut) { + B(buf).put_slice(v); } /// Deserializes a `BYTEA value. @@ -51,8 +52,8 @@ pub fn bytea_from_sql(buf: &[u8]) -> &[u8] { /// Serializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. #[inline] -pub fn text_to_sql(v: &str, buf: &mut Vec) { - buf.extend_from_slice(v.as_bytes()); +pub fn text_to_sql(v: &str, buf: &mut BytesMut) { + B(buf).put_slice(v.as_bytes()); } /// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. @@ -63,8 +64,8 @@ pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox /// Serializes a `"char"` value. #[inline] -pub fn char_to_sql(v: i8, buf: &mut Vec) { - buf.write_i8(v).unwrap(); +pub fn char_to_sql(v: i8, buf: &mut BytesMut) { + B(buf).put_i8(v); } /// Deserializes a `"char"` value. @@ -79,8 +80,8 @@ pub fn char_from_sql(mut buf: &[u8]) -> Result) { - buf.write_i16::(v).unwrap(); +pub fn int2_to_sql(v: i16, buf: &mut BytesMut) { + B(buf).put_i16_be(v); } /// Deserializes an `INT2` value. @@ -95,8 +96,8 @@ pub fn int2_from_sql(mut buf: &[u8]) -> Result) { - buf.write_i32::(v).unwrap(); +pub fn int4_to_sql(v: i32, buf: &mut BytesMut) { + B(buf).put_i32_be(v); } /// Deserializes an `INT4` value. @@ -111,8 +112,8 @@ pub fn int4_from_sql(mut buf: &[u8]) -> Result) { - buf.write_u32::(v).unwrap(); +pub fn oid_to_sql(v: Oid, buf: &mut BytesMut) { + B(buf).put_u32_be(v); } /// Deserializes an `OID` value. @@ -127,8 +128,8 @@ pub fn oid_from_sql(mut buf: &[u8]) -> Result) { - buf.write_i64::(v).unwrap(); +pub fn int8_to_sql(v: i64, buf: &mut BytesMut) { + B(buf).put_i64_be(v); } /// Deserializes an `INT8` value. @@ -143,8 +144,8 @@ pub fn int8_from_sql(mut buf: &[u8]) -> Result) { - buf.write_f32::(v).unwrap(); +pub fn float4_to_sql(v: f32, buf: &mut BytesMut) { + B(buf).put_f32_be(v); } /// Deserializes a `FLOAT4` value. @@ -159,8 +160,8 @@ pub fn float4_from_sql(mut buf: &[u8]) -> Result) { - buf.write_f64::(v).unwrap(); +pub fn float8_to_sql(v: f64, buf: &mut BytesMut) { + B(buf).put_f64_be(v); } /// Deserializes a `FLOAT8` value. @@ -177,13 +178,13 @@ pub fn float8_from_sql(mut buf: &[u8]) -> Result( values: I, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator)>, { let base = buf.len(); - buf.extend_from_slice(&[0; 4]); + B(buf).put_i32_be(0); let mut count = 0; for (key, value) in values { @@ -195,7 +196,7 @@ where Some(value) => { write_pascal_string(value, buf)?; } - None => buf.write_i32::(-1).unwrap(), + None => B(buf).put_i32_be(-1), } } @@ -205,10 +206,10 @@ where Ok(()) } -fn write_pascal_string(s: &str, buf: &mut Vec) -> Result<(), StdBox> { +fn write_pascal_string(s: &str, buf: &mut BytesMut) -> Result<(), StdBox> { let size = i32::from_usize(s.len())?; - buf.write_i32::(size).unwrap(); - buf.extend_from_slice(s.as_bytes()); + B(buf).put_i32_be(size); + B(buf).put_slice(s.as_bytes()); Ok(()) } @@ -285,16 +286,16 @@ impl<'a> FallibleIterator for HstoreEntries<'a> { pub fn varbit_to_sql( len: usize, v: I, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), StdBox> where I: Iterator, { let len = i32::from_usize(len)?; - buf.write_i32::(len).unwrap(); + B(buf).put_i32_be(len); for byte in v { - buf.push(byte); + B(buf).put_u8(byte); } Ok(()) @@ -350,8 +351,8 @@ impl<'a> Varbit<'a> { /// /// The value should represent the number of microseconds since midnight, January 1st, 2000. #[inline] -pub fn timestamp_to_sql(v: i64, buf: &mut Vec) { - buf.write_i64::(v).unwrap(); +pub fn timestamp_to_sql(v: i64, buf: &mut BytesMut) { + B(buf).put_i64_be(v); } /// Deserializes a `TIMESTAMP` or `TIMESTAMPTZ` value. @@ -370,8 +371,8 @@ pub fn timestamp_from_sql(mut buf: &[u8]) -> Result) { - buf.write_i32::(v).unwrap(); +pub fn date_to_sql(v: i32, buf: &mut BytesMut) { + B(buf).put_i32_be(v); } /// Deserializes a `DATE` value. @@ -390,8 +391,8 @@ pub fn date_from_sql(mut buf: &[u8]) -> Result) { - buf.write_i64::(v).unwrap(); +pub fn time_to_sql(v: i64, buf: &mut BytesMut) { + B(buf).put_i64_be(v); } /// Deserializes a `TIME` or `TIMETZ` value. @@ -408,8 +409,8 @@ pub fn time_from_sql(mut buf: &[u8]) -> Result) { - buf.extend_from_slice(&v); +pub fn macaddr_to_sql(v: [u8; 6], buf: &mut BytesMut) { + B(buf).put_slice(&v); } /// Deserializes a `MACADDR` value. @@ -425,8 +426,8 @@ pub fn macaddr_from_sql(buf: &[u8]) -> Result<[u8; 6], StdBox) { - buf.extend_from_slice(&v); +pub fn uuid_to_sql(v: [u8; 16], buf: &mut BytesMut) { + B(buf).put_slice(&v); } /// Deserializes a `UUID` value. @@ -447,24 +448,24 @@ pub fn array_to_sql( element_type: Oid, elements: J, mut serializer: F, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator, J: IntoIterator, - F: FnMut(T, &mut Vec) -> Result>, + F: FnMut(T, &mut BytesMut) -> Result>, { let dimensions_idx = buf.len(); - buf.extend_from_slice(&[0; 4]); + B(buf).put_i32_be(0); let flags_idx = buf.len(); - buf.extend_from_slice(&[0; 4]); - buf.write_u32::(element_type).unwrap(); + B(buf).put_i32_be(0); + B(buf).put_u32_be(element_type); let mut num_dimensions = 0; for dimension in dimensions { num_dimensions += 1; - buf.write_i32::(dimension.len).unwrap(); - buf.write_i32::(dimension.lower_bound).unwrap(); + B(buf).put_i32_be(dimension.len); + B(buf).put_i32_be(dimension.lower_bound); } let num_dimensions = i32::from_usize(num_dimensions)?; @@ -644,22 +645,22 @@ impl<'a> FallibleIterator for ArrayValues<'a> { /// Serializes an empty range. #[inline] -pub fn empty_range_to_sql(buf: &mut Vec) { - buf.push(RANGE_EMPTY); +pub fn empty_range_to_sql(buf: &mut BytesMut) { + B(buf).put_u8(RANGE_EMPTY); } /// Serializes a range value. pub fn range_to_sql( lower: F, upper: G, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), StdBox> where - F: FnOnce(&mut Vec) -> Result, StdBox>, - G: FnOnce(&mut Vec) -> Result, StdBox>, + F: FnOnce(&mut BytesMut) -> Result, StdBox>, + G: FnOnce(&mut BytesMut) -> Result, StdBox>, { let tag_idx = buf.len(); - buf.push(0); + B(buf).put_u8(0); let mut tag = 0; match write_bound(lower, buf)? { @@ -681,13 +682,13 @@ where fn write_bound( bound: F, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result, StdBox> where - F: FnOnce(&mut Vec) -> Result, StdBox>, + F: FnOnce(&mut BytesMut) -> Result, StdBox>, { let base = buf.len(); - buf.extend_from_slice(&[0; 4]); + B(buf).put_i32_be(0); let (null, ret) = match bound(buf)? { RangeBound::Inclusive(null) => (Some(null), RangeBound::Inclusive(())), @@ -782,9 +783,9 @@ pub enum Range<'a> { /// Serializes a point value. #[inline] -pub fn point_to_sql(x: f64, y: f64, buf: &mut Vec) { - buf.write_f64::(x).unwrap(); - buf.write_f64::(y).unwrap(); +pub fn point_to_sql(x: f64, y: f64, buf: &mut BytesMut) { + B(buf).put_f64_be(x); + B(buf).put_f64_be(y); } /// Deserializes a point value. @@ -821,11 +822,11 @@ impl Point { /// Serializes a box value. #[inline] -pub fn box_to_sql(x1: f64, y1: f64, x2: f64, y2: f64, buf: &mut Vec) { - buf.write_f64::(x1).unwrap(); - buf.write_f64::(y1).unwrap(); - buf.write_f64::(x2).unwrap(); - buf.write_f64::(y2).unwrap(); +pub fn box_to_sql(x1: f64, y1: f64, x2: f64, y2: f64, buf: &mut BytesMut) { + B(buf).put_f64_be(x1); + B(buf).put_f64_be(y1); + B(buf).put_f64_be(x2); + B(buf).put_f64_be(y2); } /// Deserializes a box value. @@ -870,20 +871,20 @@ impl Box { pub fn path_to_sql( closed: bool, points: I, - buf: &mut Vec, + buf: &mut BytesMut, ) -> Result<(), StdBox> where I: IntoIterator, { - buf.push(closed as u8); + B(buf).put_u8(closed as u8); let points_idx = buf.len(); - buf.extend_from_slice(&[0; 4]); + B(buf).put_i32_be(0); let mut num_points = 0; for (x, y) in points { num_points += 1; - buf.write_f64::(x).unwrap(); - buf.write_f64::(y).unwrap(); + B(buf).put_f64_be(x); + B(buf).put_f64_be(y); } let num_points = i32::from_usize(num_points)?; @@ -964,22 +965,22 @@ impl<'a> FallibleIterator for PathPoints<'a> { /// Serializes a Postgres inet. #[inline] -pub fn inet_to_sql(addr: IpAddr, netmask: u8, buf: &mut Vec) { +pub fn inet_to_sql(addr: IpAddr, netmask: u8, buf: &mut BytesMut) { let family = match addr { IpAddr::V4(_) => PGSQL_AF_INET, IpAddr::V6(_) => PGSQL_AF_INET6, }; - buf.push(family); - buf.push(netmask); - buf.push(0); // is_cidr + B(buf).put_u8(family); + B(buf).put_u8(netmask); + B(buf).put_u8(0); // is_cidr match addr { IpAddr::V4(addr) => { - buf.push(4); - buf.extend_from_slice(&addr.octets()); + B(buf).put_u8(4); + B(buf).put_slice(&addr.octets()); } IpAddr::V6(addr) => { - buf.push(16); - buf.extend_from_slice(&addr.octets()); + B(buf).put_u8(16); + B(buf).put_slice(&addr.octets()); } } } diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 68621724b..d68a1ae86 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -14,6 +14,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] "with-uuid-0_7" = ["uuid-07"] [dependencies] +bytes = "0.4" fallible-iterator = "0.2" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } postgres-derive = { version = "0.3.3", optional = true, path = "../postgres-derive" } diff --git a/postgres-types/src/bit_vec_06.rs b/postgres-types/src/bit_vec_06.rs index 52fb6d1c1..322472c6f 100644 --- a/postgres-types/src/bit_vec_06.rs +++ b/postgres-types/src/bit_vec_06.rs @@ -1,4 +1,5 @@ use bit_vec_06::BitVec; +use bytes::BytesMut; use postgres_protocol::types; use std::error::Error; @@ -19,7 +20,7 @@ impl<'a> FromSql<'a> for BitVec { } impl ToSql for BitVec { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { types::varbit_to_sql(self.len(), self.to_bytes().into_iter(), out)?; Ok(IsNull::No) } diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 8a3c8a222..9bfbb786f 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use chrono_04::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use postgres_protocol::types; use std::error::Error; @@ -18,7 +19,7 @@ impl<'a> FromSql<'a> for NaiveDateTime { } impl ToSql for NaiveDateTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let time = match self.signed_duration_since(base()).num_microseconds() { Some(time) => time, None => return Err("value too large to transmit".into()), @@ -44,7 +45,7 @@ impl ToSql for DateTime { fn to_sql( &self, type_: &Type, - w: &mut Vec, + w: &mut BytesMut, ) -> Result> { self.naive_utc().to_sql(type_, w) } @@ -66,7 +67,7 @@ impl ToSql for DateTime { fn to_sql( &self, type_: &Type, - w: &mut Vec, + w: &mut BytesMut, ) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } @@ -91,7 +92,7 @@ impl ToSql for DateTime { fn to_sql( &self, type_: &Type, - w: &mut Vec, + w: &mut BytesMut, ) -> Result> { self.with_timezone(&Utc).to_sql(type_, w) } @@ -110,7 +111,7 @@ impl<'a> FromSql<'a> for NaiveDate { } impl ToSql for NaiveDate { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let jd = self.signed_duration_since(base().date()).num_days(); if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { return Err("value too large to transmit".into()); @@ -134,7 +135,7 @@ impl<'a> FromSql<'a> for NaiveTime { } impl ToSql for NaiveTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let delta = self.signed_duration_since(NaiveTime::from_hms(0, 0, 0)); let time = match delta.num_microseconds() { Some(time) => time, diff --git a/postgres-types/src/eui48_04.rs b/postgres-types/src/eui48_04.rs index 3e7422ee0..45df89a84 100644 --- a/postgres-types/src/eui48_04.rs +++ b/postgres-types/src/eui48_04.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use eui48_04::MacAddress; use postgres_protocol::types; use std::error::Error; @@ -14,7 +15,7 @@ impl<'a> FromSql<'a> for MacAddress { } impl ToSql for MacAddress { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let mut bytes = [0; 6]; bytes.copy_from_slice(self.as_bytes()); types::macaddr_to_sql(bytes, w); diff --git a/postgres-types/src/geo_types_04.rs b/postgres-types/src/geo_types_04.rs index f51273f21..eb8b958eb 100644 --- a/postgres-types/src/geo_types_04.rs +++ b/postgres-types/src/geo_types_04.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use fallible_iterator::FallibleIterator; use geo_types_04::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; @@ -15,7 +16,7 @@ impl<'a> FromSql<'a> for Point { } impl ToSql for Point { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { types::point_to_sql(self.x(), self.y(), out); Ok(IsNull::No) } @@ -43,7 +44,7 @@ impl<'a> FromSql<'a> for Rect { } impl ToSql for Rect { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { types::box_to_sql(self.min.x, self.min.y, self.max.x, self.max.y, out); Ok(IsNull::No) } @@ -66,7 +67,7 @@ impl<'a> FromSql<'a> for LineString { } impl ToSql for LineString { - fn to_sql(&self, _: &Type, out: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { let closed = false; // always encode an open path from LineString types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; Ok(IsNull::No) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 3025df650..b6d8f7421 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -131,6 +131,7 @@ use crate::type_gen::{Inner, Other}; pub use postgres_protocol::Oid; pub use crate::special::{Date, Timestamp}; +use bytes::{BufMut, BytesMut}; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946_684_800; @@ -159,7 +160,7 @@ macro_rules! to_sql_checked { () => { fn to_sql_checked(&self, ty: &$crate::Type, - out: &mut ::std::vec::Vec) + out: &mut $crate::private::BytesMut) -> ::std::result::Result<$crate::IsNull, Box( v: &T, ty: &Type, - out: &mut Vec, + out: &mut BytesMut, ) -> Result> where T: ToSql, @@ -199,12 +200,35 @@ mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; -#[cfg(feature = "derive")] #[doc(hidden)] pub mod private; mod special; mod type_gen; +// https://github.com/tokio-rs/bytes/issues/170 +struct B<'a>(&'a mut BytesMut); + +impl<'a> BufMut for B<'a> { + #[inline] + fn remaining_mut(&self) -> usize { + usize::max_value() - self.0.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + self.0.advance_mut(cnt); + } + + #[inline] + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + if !self.0.has_remaining_mut() { + self.0.reserve(64); + } + + self.0.bytes_mut() + } +} + /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Type(Inner); @@ -625,7 +649,7 @@ pub enum IsNull { /// | `f32` | REAL | /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | -/// | `&[u8]`/Vec` | BYTEA | +/// | `&[u8]`/`Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// | `IpAddr` | INET | @@ -673,7 +697,7 @@ pub trait ToSql: fmt::Debug { /// The return value indicates if this value should be represented as /// `NULL`. If this is the case, implementations **must not** write /// anything to `out`. - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> + fn to_sql(&self, ty: &Type, out: &mut BytesMut) -> Result> where Self: Sized; @@ -690,7 +714,7 @@ pub trait ToSql: fmt::Debug { fn to_sql_checked( &self, ty: &Type, - out: &mut Vec, + out: &mut BytesMut, ) -> Result>; } @@ -698,7 +722,11 @@ impl<'a, T> ToSql for &'a T where T: ToSql, { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { (*self).to_sql(ty, out) } @@ -710,7 +738,11 @@ where } impl ToSql for Option { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { match *self { Some(ref val) => val.to_sql(ty, out), None => Ok(IsNull::Yes), @@ -725,7 +757,7 @@ impl ToSql for Option { } impl<'a, T: ToSql> ToSql for &'a [T] { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { let member_type = match *ty.kind() { Kind::Array(ref member) => member, _ => panic!("expected array type"), @@ -760,7 +792,7 @@ impl<'a, T: ToSql> ToSql for &'a [T] { } impl<'a> ToSql for &'a [u8] { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::bytea_to_sql(*self, w); Ok(IsNull::No) } @@ -771,7 +803,7 @@ impl<'a> ToSql for &'a [u8] { } impl ToSql for Vec { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[T] as ToSql>::to_sql(&&**self, ty, w) } @@ -783,7 +815,7 @@ impl ToSql for Vec { } impl ToSql for Vec { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[u8] as ToSql>::to_sql(&&**self, ty, w) } @@ -795,7 +827,7 @@ impl ToSql for Vec { } impl<'a> ToSql for &'a str { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::text_to_sql(*self, w); Ok(IsNull::No) } @@ -812,7 +844,7 @@ impl<'a> ToSql for &'a str { } impl<'a> ToSql for Cow<'a, str> { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&str as ToSql>::to_sql(&&self.as_ref(), ty, w) } @@ -824,7 +856,7 @@ impl<'a> ToSql for Cow<'a, str> { } impl ToSql for String { - fn to_sql(&self, ty: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&str as ToSql>::to_sql(&&**self, ty, w) } @@ -840,7 +872,7 @@ macro_rules! simple_to { impl ToSql for $t { fn to_sql(&self, _: &Type, - w: &mut Vec) + w: &mut BytesMut) -> Result> { types::$f(*self, w); Ok(IsNull::No) @@ -866,7 +898,7 @@ impl ToSql for HashMap, H> where H: BuildHasher, { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::hstore_to_sql( self.iter().map(|(k, v)| (&**k, v.as_ref().map(|v| &**v))), w, @@ -882,7 +914,7 @@ where } impl ToSql for SystemTime { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); let to_usec = @@ -903,7 +935,7 @@ impl ToSql for SystemTime { } impl ToSql for IpAddr { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { let netmask = match self { IpAddr::V4(_) => 32, IpAddr::V6(_) => 128, diff --git a/postgres-types/src/private.rs b/postgres-types/src/private.rs index 61ea3656f..774f9a301 100644 --- a/postgres-types/src/private.rs +++ b/postgres-types/src/private.rs @@ -1,4 +1,5 @@ use crate::{FromSql, Type}; +pub use bytes::BytesMut; use std::error::Error; pub fn read_be_i32(buf: &mut &[u8]) -> Result> { diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index 01616d32f..c6c35b001 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use serde_1::{Deserialize, Serialize}; use serde_json_1::Value; use std::error::Error; @@ -35,7 +36,11 @@ impl ToSql for Json where T: Serialize + Debug, { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { if *ty == Type::JSONB { out.push(1); } @@ -56,7 +61,11 @@ impl<'a> FromSql<'a> for Value { } impl ToSql for Value { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { Json(self).to_sql(ty, out) } diff --git a/postgres-types/src/special.rs b/postgres-types/src/special.rs index b1da02a74..5a2d7bc08 100644 --- a/postgres-types/src/special.rs +++ b/postgres-types/src/special.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use postgres_protocol::types; use std::error::Error; use std::{i32, i64}; @@ -30,7 +31,11 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Date { } impl ToSql for Date { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { let value = match *self { Date::PosInfinity => i32::MAX, Date::NegInfinity => i32::MIN, @@ -78,7 +83,11 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp { } impl ToSql for Timestamp { - fn to_sql(&self, ty: &Type, out: &mut Vec) -> Result> { + fn to_sql( + &self, + ty: &Type, + out: &mut BytesMut, + ) -> Result> { let value = match *self { Timestamp::PosInfinity => i64::MAX, Timestamp::NegInfinity => i64::MIN, diff --git a/postgres-types/src/uuid_07.rs b/postgres-types/src/uuid_07.rs index 951992312..60f85a52a 100644 --- a/postgres-types/src/uuid_07.rs +++ b/postgres-types/src/uuid_07.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use postgres_protocol::types; use std::error::Error; use uuid_07::Uuid; @@ -14,7 +15,7 @@ impl<'a> FromSql<'a> for Uuid { } impl ToSql for Uuid { - fn to_sql(&self, _: &Type, w: &mut Vec) -> Result> { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::uuid_to_sql(*self.as_bytes(), w); Ok(IsNull::No) } diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 018663c42..eb9d1ba8d 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -20,8 +20,11 @@ where I::IntoIter: ExactSizeIterator, { let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); - let mut buf = query::encode_bind(&statement, params, &name)?; - frontend::sync(&mut buf); + let buf = client.with_buf(|buf| { + query::encode_bind(&statement, params, &name, buf)?; + frontend::sync(buf); + Ok(buf.take().freeze()) + })?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index d85b4281d..f00d32779 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -3,6 +3,7 @@ use crate::tls::TlsConnect; use crate::{connect_tls, Error}; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use bytes::BytesMut; pub async fn cancel_query_raw( stream: S, @@ -17,7 +18,7 @@ where { let (mut stream, _) = connect_tls::connect_tls(stream, mode, tls).await?; - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::cancel_request(process_id, secret_key, &mut buf); stream.write_all(&buf).await.map_err(Error::io)?; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 060904fe2..78c71e3a4 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -18,7 +18,7 @@ use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; -use bytes::IntoBuf; +use bytes::{IntoBuf, BytesMut}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{future, TryStream, TryStreamExt}; @@ -64,6 +64,7 @@ struct State { typeinfo_composite: Option, typeinfo_enum: Option, types: HashMap, + buf: BytesMut, } pub struct InnerClient { @@ -116,6 +117,13 @@ impl InnerClient { pub fn set_type(&self, oid: Oid, type_: &Type) { self.state.lock().types.insert(oid, type_.clone()); } + + pub fn with_buf(&self, f: F) -> R where F: FnOnce(&mut BytesMut) -> R { + let mut state = self.state.lock(); + let r = f(&mut state.buf); + state.buf.clear(); + r + } } #[derive(Clone)] @@ -155,6 +163,7 @@ impl Client { typeinfo_composite: None, typeinfo_enum: None, types: HashMap::new(), + buf: BytesMut::new(), }), }), #[cfg(feature = "runtime")] diff --git a/tokio-postgres/src/codec.rs b/tokio-postgres/src/codec.rs index 4e56459aa..f9f216bd9 100644 --- a/tokio-postgres/src/codec.rs +++ b/tokio-postgres/src/codec.rs @@ -1,4 +1,4 @@ -use bytes::{Buf, BytesMut}; +use bytes::{Buf, Bytes, BytesMut}; use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend; use postgres_protocol::message::frontend::CopyData; @@ -6,7 +6,7 @@ use std::io; use tokio::codec::{Decoder, Encoder}; pub enum FrontendMessage { - Raw(Vec), + Raw(Bytes), CopyData(CopyData>), } diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index dbbc71a59..ee04bca60 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -18,6 +18,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use tokio::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; +use bytes::BytesMut; pub struct StartupStream { inner: Framed, PostgresCodec>, @@ -122,11 +123,11 @@ where params.push(("application_name", &**application_name)); } - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::startup_message(params, &mut buf).map_err(Error::encode)?; stream - .send(FrontendMessage::Raw(buf)) + .send(FrontendMessage::Raw(buf.freeze())) .await .map_err(Error::io) } @@ -212,11 +213,11 @@ where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::password_message(password, &mut buf).map_err(Error::encode)?; stream - .send(FrontendMessage::Raw(buf)) + .send(FrontendMessage::Raw(buf.freeze())) .await .map_err(Error::io) } @@ -272,10 +273,10 @@ where let mut scram = ScramSha256::new(password, channel_binding); - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::sasl_initial_response(mechanism, scram.message(), &mut buf).map_err(Error::encode)?; stream - .send(FrontendMessage::Raw(buf)) + .send(FrontendMessage::Raw(buf.freeze())) .await .map_err(Error::io)?; @@ -290,10 +291,10 @@ where .update(body.data()) .map_err(|e| Error::authentication(e.into()))?; - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::sasl_response(scram.message(), &mut buf).map_err(Error::encode)?; stream - .send(FrontendMessage::Raw(buf)) + .send(FrontendMessage::Raw(buf.freeze())) .await .map_err(Error::io)?; diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 1bc35f584..9e4ac3ac9 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -5,6 +5,7 @@ use crate::tls::{ChannelBinding, TlsConnect}; use crate::Error; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use bytes::BytesMut; pub async fn connect_tls( mut stream: S, @@ -24,7 +25,7 @@ where SslMode::__NonExhaustive => unreachable!(), } - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::ssl_request(&mut buf); stream.write_all(&buf).await.map_err(Error::io)?; diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index c3f90ef88..841f16d08 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -3,6 +3,7 @@ use crate::copy_in::CopyInReceiver; use crate::error::DbError; use crate::maybe_tls_stream::MaybeTlsStream; use crate::{AsyncMessage, Error, Notification}; +use bytes::BytesMut; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::stream::FusedStream; @@ -211,9 +212,9 @@ where Poll::Ready(None) if self.responses.is_empty() && self.state == State::Active => { trace!("poll_write: at eof, terminating"); self.state = State::Terminating; - let mut request = vec![]; + let mut request = BytesMut::new(); frontend::terminate(&mut request); - RequestMessages::Single(FrontendMessage::Raw(request)) + RequestMessages::Single(FrontendMessage::Raw(request.freeze())) } Poll::Ready(None) => { trace!( diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index 239f16663..ec9399290 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -46,17 +46,17 @@ impl Stream for CopyInReceiver { Some(CopyInMessage::Message(message)) => Poll::Ready(Some(message)), Some(CopyInMessage::Done) => { self.done = true; - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::copy_done(&mut buf); frontend::sync(&mut buf); - Poll::Ready(Some(FrontendMessage::Raw(buf))) + Poll::Ready(Some(FrontendMessage::Raw(buf.freeze()))) } None => { self.done = true; - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::copy_fail("", &mut buf).unwrap(); frontend::sync(&mut buf); - Poll::Ready(Some(FrontendMessage::Raw(buf))) + Poll::Ready(Some(FrontendMessage::Raw(buf.freeze()))) } } } @@ -76,7 +76,7 @@ where ::Buf: 'static + Send, S::Error: Into>, { - let buf = query::encode(&statement, params)?; + let buf = query::encode(client, &statement, params)?; let (mut sender, receiver) = mpsc::channel(1); let receiver = CopyInReceiver::new(receiver); diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 41bd87476..f5d52429d 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -18,12 +18,12 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = query::encode(&statement, params)?; + let buf = query::encode(client, &statement, params)?; let responses = start(client, buf).await?; Ok(CopyStream { responses }) } -async fn start(client: &InnerClient, buf: Vec) -> Result { +async fn start(client: &InnerClient, buf: Bytes) -> Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; match responses.next().await? { diff --git a/tokio-postgres/src/portal.rs b/tokio-postgres/src/portal.rs index f2cb2ac3e..db4624a35 100644 --- a/tokio-postgres/src/portal.rs +++ b/tokio-postgres/src/portal.rs @@ -2,6 +2,7 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::Statement; +use bytes::BytesMut; use postgres_protocol::message::frontend; use std::sync::{Arc, Weak}; @@ -14,10 +15,10 @@ struct Inner { impl Drop for Inner { fn drop(&mut self) { if let Some(client) = self.client.upgrade() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::close(b'P', &self.name, &mut buf).expect("portal name not valid"); frontend::sync(&mut buf); - let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); } } } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 8f27156d4..a0d621545 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -14,6 +14,7 @@ use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use bytes::Bytes; const TYPEINFO_QUERY: &str = "\ SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid @@ -63,9 +64,7 @@ pub async fn prepare( types: &[Type], ) -> Result { let name = format!("s{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); - let buf = encode(&name, query, types); - - let buf = buf?; + let buf = encode(client, &name, query, types)?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; match responses.next().await? { @@ -112,13 +111,13 @@ fn prepare_rec<'a>( Box::pin(prepare(client, query, types)) } -fn encode(name: &str, query: &str, types: &[Type]) -> Result, Error> { - let mut buf = vec![]; - frontend::parse(name, query, types.iter().map(Type::oid), &mut buf).map_err(Error::encode)?; - frontend::describe(b'S', &name, &mut buf).map_err(Error::encode)?; - frontend::sync(&mut buf); - - Ok(buf) +fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result { + client.with_buf(|buf| { + frontend::parse(name, query, types.iter().map(Type::oid), buf).map_err(Error::encode)?; + frontend::describe(b'S', &name, buf).map_err(Error::encode)?; + frontend::sync(buf); + Ok(buf.take().freeze()) + }) } async fn get_type(client: &Arc, oid: Oid) -> Result { diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 5686ab9fb..6d3d198cb 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -8,6 +8,7 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; use std::task::{Context, Poll}; +use bytes::{Bytes, BytesMut}; pub async fn query<'a, I>( client: &InnerClient, @@ -18,7 +19,7 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = encode(&statement, params)?; + let buf = encode(client, &statement, params)?; let responses = start(client, buf).await?; Ok(RowStream { statement, @@ -31,9 +32,11 @@ pub async fn query_portal( portal: &Portal, max_rows: i32, ) -> Result { - let mut buf = vec![]; - frontend::execute(portal.name(), max_rows, &mut buf).map_err(Error::encode)?; - frontend::sync(&mut buf); + let buf = client.with_buf(|buf| { + frontend::execute(portal.name(), max_rows, buf).map_err(Error::encode)?; + frontend::sync(buf); + Ok(buf.take().freeze()) + })?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -52,7 +55,7 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = encode(&statement, params)?; + let buf = encode(client, &statement, params)?; let mut responses = start(client, buf).await?; loop { @@ -75,7 +78,7 @@ where } } -async fn start(client: &InnerClient, buf: Vec) -> Result { +async fn start(client: &InnerClient, buf: Bytes) -> Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; match responses.next().await? { @@ -86,19 +89,20 @@ async fn start(client: &InnerClient, buf: Vec) -> Result { Ok(responses) } -pub fn encode<'a, I>(statement: &Statement, params: I) -> Result, Error> +pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let mut buf = encode_bind(statement, params, "")?; - frontend::execute("", 0, &mut buf).map_err(Error::encode)?; - frontend::sync(&mut buf); - - Ok(buf) + client.with_buf(|buf| { + encode_bind(statement, params, "", buf)?; + frontend::execute("", 0, buf).map_err(Error::encode)?; + frontend::sync(buf); + Ok(buf.take().freeze()) + }) } -pub fn encode_bind<'a, I>(statement: &Statement, params: I, portal: &str) -> Result, Error> +pub fn encode_bind<'a, I>(statement: &Statement, params: I, portal: &str, buf: &mut BytesMut) -> Result<(), Error> where I: IntoIterator, I::IntoIter: ExactSizeIterator, @@ -112,8 +116,6 @@ where params.len() ); - let mut buf = vec![]; - let mut error_idx = 0; let r = frontend::bind( portal, @@ -129,10 +131,10 @@ where } }, Some(1), - &mut buf, + buf, ); match r { - Ok(()) => Ok(buf), + Ok(()) => Ok(()), Err(frontend::BindError::Conversion(e)) => Err(Error::to_sql(e, error_idx)), Err(frontend::BindError::Serialization(e)) => Err(Error::encode(e)), } diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index fb9154fb3..b9d9b65d6 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -9,9 +9,10 @@ use postgres_protocol::message::frontend; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +use bytes::Bytes; pub async fn simple_query(client: &InnerClient, query: &str) -> Result { - let buf = encode(query)?; + let buf = encode(client, query)?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; Ok(SimpleQueryStream { @@ -21,7 +22,7 @@ pub async fn simple_query(client: &InnerClient, query: &str) -> Result Result<(), Error> { - let buf = encode(query)?; + let buf = encode(client, query)?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; loop { @@ -36,10 +37,11 @@ pub async fn batch_execute(client: &InnerClient, query: &str) -> Result<(), Erro } } -fn encode(query: &str) -> Result, Error> { - let mut buf = vec![]; - frontend::query(query, &mut buf).map_err(Error::encode)?; - Ok(buf) +fn encode(client: &InnerClient, query: &str) -> Result { + client.with_buf(|buf| { + frontend::query(query, buf).map_err(Error::encode)?; + Ok(buf.take().freeze()) + }) } pub struct SimpleQueryStream { diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 52028bb20..b8b04bc2f 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -2,6 +2,7 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::Type; +use bytes::BytesMut; use postgres_protocol::message::frontend; use std::sync::{Arc, Weak}; @@ -15,10 +16,10 @@ struct StatementInner { impl Drop for StatementInner { fn drop(&mut self) { if let Some(client) = self.client.upgrade() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); frontend::close(b'S', &self.name, &mut buf).expect("statement name not valid"); frontend::sync(&mut buf); - let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); } } } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index cda11e0e8..b70311e4a 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -16,6 +16,7 @@ use futures::{TryStream, TryStreamExt}; use postgres_protocol::message::frontend; use std::error; use tokio::io::{AsyncRead, AsyncWrite}; +use postgres_types::private::BytesMut; /// A representation of a PostgreSQL database transaction. /// @@ -33,7 +34,7 @@ impl<'a> Drop for Transaction<'a> { return; } - let mut buf = vec![]; + let mut buf = BytesMut::new(); let query = if self.depth == 0 { "ROLLBACK".to_string() } else { @@ -43,7 +44,7 @@ impl<'a> Drop for Transaction<'a> { let _ = self .client .inner() - .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + .send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); } } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index fefd1ed5b..3daabcd87 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -10,6 +10,7 @@ use std::time::{Duration, UNIX_EPOCH}; use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; use crate::connect; +use bytes::BytesMut; #[cfg(feature = "with-bit-vec-0_6")] mod bit_vec_06; @@ -455,7 +456,7 @@ async fn domain() { fn to_sql( &self, ty: &Type, - out: &mut Vec, + out: &mut BytesMut, ) -> result::Result> { let inner = match *ty.kind() { Kind::Domain(ref inner) => inner, From 07363825932b4d5bae559c58ef23bbf7e4f987e2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 12 Oct 2019 17:47:47 -0700 Subject: [PATCH 273/819] Fix tests --- postgres-protocol/src/types/test.rs | 23 ++++++++++++----------- tokio-postgres/src/cancel_query_raw.rs | 2 +- tokio-postgres/src/client.rs | 7 +++++-- tokio-postgres/src/connect_raw.rs | 2 +- tokio-postgres/src/connect_tls.rs | 2 +- tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/src/query.rs | 9 +++++++-- tokio-postgres/src/simple_query.rs | 2 +- tokio-postgres/src/transaction.rs | 2 +- 9 files changed, 30 insertions(+), 21 deletions(-) diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 7d2563555..8796ab31b 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -1,3 +1,4 @@ +use bytes::BytesMut; use fallible_iterator::FallibleIterator; use std::collections::HashMap; @@ -6,32 +7,32 @@ use crate::IsNull; #[test] fn bool() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); bool_to_sql(true, &mut buf); assert_eq!(bool_from_sql(&buf).unwrap(), true); - let mut buf = vec![]; + let mut buf = BytesMut::new(); bool_to_sql(false, &mut buf); assert_eq!(bool_from_sql(&buf).unwrap(), false); } #[test] fn int2() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); int2_to_sql(0x0102, &mut buf); assert_eq!(int2_from_sql(&buf).unwrap(), 0x0102); } #[test] fn int4() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); int4_to_sql(0x0102_0304, &mut buf); assert_eq!(int4_from_sql(&buf).unwrap(), 0x0102_0304); } #[test] fn int8() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); int8_to_sql(0x0102_0304_0506_0708, &mut buf); assert_eq!(int8_from_sql(&buf).unwrap(), 0x0102_0304_0506_0708); } @@ -39,7 +40,7 @@ fn int8() { #[test] #[allow(clippy::float_cmp)] fn float4() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); float4_to_sql(10343.95, &mut buf); assert_eq!(float4_from_sql(&buf).unwrap(), 10343.95); } @@ -47,7 +48,7 @@ fn float4() { #[test] #[allow(clippy::float_cmp)] fn float8() { - let mut buf = vec![]; + let mut buf = BytesMut::new(); float8_to_sql(10343.95, &mut buf); assert_eq!(float8_from_sql(&buf).unwrap(), 10343.95); } @@ -58,7 +59,7 @@ fn hstore() { map.insert("hello", Some("world")); map.insert("hola", None); - let mut buf = vec![]; + let mut buf = BytesMut::new(); hstore_to_sql(map.iter().map(|(&k, &v)| (k, v)), &mut buf).unwrap(); assert_eq!( hstore_from_sql(&buf) @@ -74,7 +75,7 @@ fn varbit() { let len = 12; let bits = [0b0010_1011, 0b0000_1111]; - let mut buf = vec![]; + let mut buf = BytesMut::new(); varbit_to_sql(len, bits.iter().cloned(), &mut buf).unwrap(); let out = varbit_from_sql(&buf).unwrap(); assert_eq!(out.len(), len); @@ -95,7 +96,7 @@ fn array() { ]; let values = [None, Some(&b"hello"[..])]; - let mut buf = vec![]; + let mut buf = BytesMut::new(); array_to_sql( dimensions.iter().cloned(), 10, @@ -132,7 +133,7 @@ fn non_null_array() { ]; let values = [Some(&b"hola"[..]), Some(&b"hello"[..])]; - let mut buf = vec![]; + let mut buf = BytesMut::new(); array_to_sql( dimensions.iter().cloned(), 10, diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index f00d32779..0dcdd8ba9 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -1,9 +1,9 @@ use crate::config::SslMode; use crate::tls::TlsConnect; use crate::{connect_tls, Error}; +use bytes::BytesMut; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use bytes::BytesMut; pub async fn cancel_query_raw( stream: S, diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 78c71e3a4..e0133bb3b 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -18,7 +18,7 @@ use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; -use bytes::{IntoBuf, BytesMut}; +use bytes::{BytesMut, IntoBuf}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{future, TryStream, TryStreamExt}; @@ -118,7 +118,10 @@ impl InnerClient { self.state.lock().types.insert(oid, type_.clone()); } - pub fn with_buf(&self, f: F) -> R where F: FnOnce(&mut BytesMut) -> R { + pub fn with_buf(&self, f: F) -> R + where + F: FnOnce(&mut BytesMut) -> R, + { let mut state = self.state.lock(); let r = f(&mut state.buf); state.buf.clear(); diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index ee04bca60..b96ced036 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -4,6 +4,7 @@ use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::{ChannelBinding, TlsConnect}; use crate::{Client, Connection, Error}; +use bytes::BytesMut; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{ready, Sink, SinkExt, Stream, TryStreamExt}; @@ -18,7 +19,6 @@ use std::pin::Pin; use std::task::{Context, Poll}; use tokio::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; -use bytes::BytesMut; pub struct StartupStream { inner: Framed, PostgresCodec>, diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 9e4ac3ac9..d03357b46 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -3,9 +3,9 @@ use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; use crate::tls::{ChannelBinding, TlsConnect}; use crate::Error; +use bytes::BytesMut; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use bytes::BytesMut; pub async fn connect_tls( mut stream: S, diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index a0d621545..c00e9127b 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -5,6 +5,7 @@ use crate::error::SqlState; use crate::types::{Field, Kind, Oid, Type}; use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; +use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::TryStreamExt; use pin_utils::pin_mut; @@ -14,7 +15,6 @@ use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use bytes::Bytes; const TYPEINFO_QUERY: &str = "\ SELECT t.typname, t.typtype, t.typelem, r.rngsubtype, t.typbasetype, n.nspname, t.typrelid diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 6d3d198cb..cc362eac6 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -3,12 +3,12 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; use crate::{Error, Portal, Row, Statement}; +use bytes::{Bytes, BytesMut}; use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; use std::task::{Context, Poll}; -use bytes::{Bytes, BytesMut}; pub async fn query<'a, I>( client: &InnerClient, @@ -102,7 +102,12 @@ where }) } -pub fn encode_bind<'a, I>(statement: &Statement, params: I, portal: &str, buf: &mut BytesMut) -> Result<(), Error> +pub fn encode_bind<'a, I>( + statement: &Statement, + params: I, + portal: &str, + buf: &mut BytesMut, +) -> Result<(), Error> where I: IntoIterator, I::IntoIter: ExactSizeIterator, diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index b9d9b65d6..11d04a1ac 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -2,6 +2,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; +use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; @@ -9,7 +10,6 @@ use postgres_protocol::message::frontend; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use bytes::Bytes; pub async fn simple_query(client: &InnerClient, query: &str) -> Result { let buf = encode(client, query)?; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index b70311e4a..686cd1ffa 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -14,9 +14,9 @@ use crate::{ use bytes::IntoBuf; use futures::{TryStream, TryStreamExt}; use postgres_protocol::message::frontend; +use postgres_types::private::BytesMut; use std::error; use tokio::io::{AsyncRead, AsyncWrite}; -use postgres_types::private::BytesMut; /// A representation of a PostgreSQL database transaction. /// From 20adbaf36b34705f01c5020e5c83b2becbf18422 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 12 Oct 2019 18:01:27 -0700 Subject: [PATCH 274/819] Fix build for real --- postgres-types/src/serde_json_1.rs | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index c6c35b001..7b8cfe8fe 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,4 +1,4 @@ -use bytes::BytesMut; +use bytes::{BufMut, BytesMut}; use serde_1::{Deserialize, Serialize}; use serde_json_1::Value; use std::error::Error; @@ -7,6 +7,30 @@ use std::io::Read; use crate::{FromSql, IsNull, ToSql, Type}; +// https://github.com/tokio-rs/bytes/issues/170 +struct B<'a>(&'a mut BytesMut); + +impl<'a> BufMut for B<'a> { + #[inline] + fn remaining_mut(&self) -> usize { + usize::max_value() - self.0.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + self.0.advance_mut(cnt); + } + + #[inline] + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + if !self.0.has_remaining_mut() { + self.0.reserve(64); + } + + self.0.bytes_mut() + } +} + /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. #[derive(Debug)] pub struct Json(pub T); @@ -42,9 +66,9 @@ where out: &mut BytesMut, ) -> Result> { if *ty == Type::JSONB { - out.push(1); + B(out).put_u8(1); } - serde_json_1::ser::to_writer(out, &self.0)?; + serde_json_1::ser::to_writer(B(out).writer(), &self.0)?; Ok(IsNull::No) } From 9e42c67b7bb2225fbb3a889d036d925b3b4d9d4f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 12 Oct 2019 18:07:09 -0700 Subject: [PATCH 275/819] Use the client buffer for cleanup --- tokio-postgres/src/portal.rs | 11 ++++++----- tokio-postgres/src/statement.rs | 11 ++++++----- tokio-postgres/src/transaction.rs | 9 +++++---- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/tokio-postgres/src/portal.rs b/tokio-postgres/src/portal.rs index db4624a35..ba8ab82a1 100644 --- a/tokio-postgres/src/portal.rs +++ b/tokio-postgres/src/portal.rs @@ -2,7 +2,6 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::Statement; -use bytes::BytesMut; use postgres_protocol::message::frontend; use std::sync::{Arc, Weak}; @@ -15,10 +14,12 @@ struct Inner { impl Drop for Inner { fn drop(&mut self) { if let Some(client) = self.client.upgrade() { - let mut buf = BytesMut::new(); - frontend::close(b'P', &self.name, &mut buf).expect("portal name not valid"); - frontend::sync(&mut buf); - let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); + let buf = client.with_buf(|buf| { + frontend::close(b'P', &self.name, buf).unwrap(); + frontend::sync(buf); + buf.take().freeze() + }); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); } } } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index b8b04bc2f..6c7ce6794 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -2,7 +2,6 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::Type; -use bytes::BytesMut; use postgres_protocol::message::frontend; use std::sync::{Arc, Weak}; @@ -16,10 +15,12 @@ struct StatementInner { impl Drop for StatementInner { fn drop(&mut self) { if let Some(client) = self.client.upgrade() { - let mut buf = BytesMut::new(); - frontend::close(b'S', &self.name, &mut buf).expect("statement name not valid"); - frontend::sync(&mut buf); - let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); + let buf = client.with_buf(|buf| { + frontend::close(b'S', &self.name, buf).unwrap(); + frontend::sync(buf); + buf.take().freeze() + }); + let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); } } } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 686cd1ffa..de95862d4 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -14,7 +14,6 @@ use crate::{ use bytes::IntoBuf; use futures::{TryStream, TryStreamExt}; use postgres_protocol::message::frontend; -use postgres_types::private::BytesMut; use std::error; use tokio::io::{AsyncRead, AsyncWrite}; @@ -34,17 +33,19 @@ impl<'a> Drop for Transaction<'a> { return; } - let mut buf = BytesMut::new(); let query = if self.depth == 0 { "ROLLBACK".to_string() } else { format!("ROLLBACK TO sp{}", self.depth) }; - frontend::query(&query, &mut buf).unwrap(); + let buf = self.client.inner().with_buf(|buf| { + frontend::query(&query, buf).unwrap(); + buf.take().freeze() + }); let _ = self .client .inner() - .send(RequestMessages::Single(FrontendMessage::Raw(buf.freeze()))); + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); } } From 270371a27a829ca357c31d6857d1fd33d5e1b4fb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 15:07:03 -0700 Subject: [PATCH 276/819] Remove some unused dependencies --- tokio-postgres/Cargo.toml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 8b88aab8c..c55799964 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -21,7 +21,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds", "tokio-executor", "lazy_static"] +runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds"] "with-bit-vec-0_6" = ["postgres-types/with-bit-vec-0_6"] "with-chrono-0_4" = ["postgres-types/with-chrono-0_4"] @@ -43,9 +43,6 @@ postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } postgres-types = { version = "0.1.0", path = "../postgres-types" } tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } -tokio-executor = { version = "=0.2.0-alpha.6", optional = true } -lazy_static = { version = "1.0", optional = true } - [dev-dependencies] tokio = "=0.2.0-alpha.6" env_logger = "0.5" From e5d2205ce10f049b50e5582d8604c168356a29e5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 15:13:27 -0700 Subject: [PATCH 277/819] Cut out unneeded quotes --- tokio-postgres/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index c55799964..ba5e5aa65 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -23,12 +23,12 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds"] -"with-bit-vec-0_6" = ["postgres-types/with-bit-vec-0_6"] -"with-chrono-0_4" = ["postgres-types/with-chrono-0_4"] -"with-eui48-0_4" = ["postgres-types/with-eui48-0_4"] -"with-geo-types-0_4" = ["postgres-types/with-geo-types-0_4"] +with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] +with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] +with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] +with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] -"with-uuid-0_7" = ["postgres-types/with-uuid-0_7"] +with-uuid-0_7 = ["postgres-types/with-uuid-0_7"] [dependencies] bytes = "0.4" From 7b73eee3fb5a257842bd3b30d0bb64b661776745 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 16:42:54 -0700 Subject: [PATCH 278/819] Make streams !Unpin Hedging against the future if we end up using an intrusive list implementation --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/lib.rs | 2 ++ tokio-postgres/src/query.rs | 15 ++++++++++++--- tokio-postgres/src/simple_query.rs | 16 ++++++++++++---- 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ba5e5aa65..fa04f42c7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -38,6 +38,7 @@ log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" +pin-project = "0.4" phf = "0.7.23" postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } postgres-types = { version = "0.1.0", path = "../postgres-types" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 390369279..447955d30 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -114,6 +114,8 @@ pub use crate::row::{Row, SimpleQueryRow}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; +pub use crate::query::RowStream; +pub use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index cc362eac6..9d3829c5f 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -1,4 +1,5 @@ use crate::client::{InnerClient, Responses}; +use pin_project::pin_project; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; @@ -9,6 +10,7 @@ use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::pin::Pin; use std::task::{Context, Poll}; +use std::marker::PhantomPinned; pub async fn query<'a, I>( client: &InnerClient, @@ -24,6 +26,7 @@ where Ok(RowStream { statement, responses, + _p: PhantomPinned, }) } @@ -43,6 +46,7 @@ pub async fn query_portal( Ok(RowStream { statement: portal.statement().clone(), responses, + _p: PhantomPinned, }) } @@ -145,18 +149,23 @@ where } } +/// A stream of table rows. +#[pin_project] pub struct RowStream { statement: Statement, responses: Responses, + #[pin] + _p: PhantomPinned, } impl Stream for RowStream { type Item = Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match ready!(self.responses.poll_next(cx)?) { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + match ready!(this.responses.poll_next(cx)?) { Message::DataRow(body) => { - Poll::Ready(Some(Ok(Row::new(self.statement.clone(), body)?))) + Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) } Message::EmptyQueryResponse | Message::CommandComplete(_) diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 11d04a1ac..8f31cd372 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -10,6 +10,8 @@ use postgres_protocol::message::frontend; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +use std::marker::PhantomPinned; +use pin_project::pin_project; pub async fn simple_query(client: &InnerClient, query: &str) -> Result { let buf = encode(client, query)?; @@ -18,6 +20,7 @@ pub async fn simple_query(client: &InnerClient, query: &str) -> Result Result { }) } +/// A stream of simple query results. +#[pin_project] pub struct SimpleQueryStream { responses: Responses, columns: Option>, + #[pin] + _p: PhantomPinned, } impl Stream for SimpleQueryStream { type Item = Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); loop { - match ready!(self.responses.poll_next(cx)?) { + match ready!(this.responses.poll_next(cx)?) { Message::CommandComplete(body) => { let rows = body .tag() @@ -76,10 +84,10 @@ impl Stream for SimpleQueryStream { .collect::>() .map_err(Error::parse)? .into(); - self.columns = Some(columns); + *this.columns = Some(columns); } Message::DataRow(body) => { - let row = match &self.columns { + let row = match &this.columns { Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, None => return Poll::Ready(Some(Err(Error::unexpected_message()))), }; From 9ebdca23e1d83d128687e83ef5b54052c67c98af Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 17:44:49 -0700 Subject: [PATCH 279/819] Release postgres-protocol v0.5.0-alpha.1 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/lib.rs | 2 +- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 170e06f8e..1a2bd1ec2 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.0-alpha.1 - 2019-10-14 + +### Changed + +* Frontend messages and types now serialize to `BytesMut` rather than `Vec`. + ## v0.4.1 - 2019-06-29 ### Added diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index e4d8a5069..bb34c0a96 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.4.1" +version = "0.5.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index e7ac84d25..8a9f90e2f 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,7 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.4")] +#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.5")] #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index d68a1ae86..41e056285 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -16,7 +16,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] [dependencies] bytes = "0.4" fallible-iterator = "0.2" -postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } +postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-derive = { version = "0.3.3", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index fa04f42c7..98b160c1c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,7 +40,7 @@ percent-encoding = "1.0" pin-utils = "=0.1.0-alpha.4" pin-project = "0.4" phf = "0.7.23" -postgres-protocol = { version = "0.4.1", path = "../postgres-protocol" } +postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-types = { version = "0.1.0", path = "../postgres-types" } tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } From 94115a81b47c7ea767995509ab4dd93563281817 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 17:50:04 -0700 Subject: [PATCH 280/819] Release postgres-derive v0.4.0-alpha.1 --- postgres-derive/Cargo.toml | 6 +- postgres-derive/LICENSE-APACHE | 201 +++++++++++++++++++++++++++++++++ postgres-derive/LICENSE-MIT | 22 ++++ postgres-derive/src/lib.rs | 2 + postgres-types/Cargo.toml | 2 +- 5 files changed, 228 insertions(+), 5 deletions(-) create mode 100644 postgres-derive/LICENSE-APACHE create mode 100644 postgres-derive/LICENSE-MIT diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 9121e8ee4..0a9c2e678 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,13 +1,11 @@ [package] name = "postgres-derive" -version = "0.3.3" +version = "0.4.0-alpha.1" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" -description = "Deriving plugin support for Postgres enum, domain, and composite types" +description = "An internal crate used by postgres-types" repository = "https://github.com/sfackler/rust-postgres" -readme = "README.md" -keywords = ["database", "postgres", "postgresql", "sql"] [lib] proc-macro = true diff --git a/postgres-derive/LICENSE-APACHE b/postgres-derive/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/postgres-derive/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/postgres-derive/LICENSE-MIT b/postgres-derive/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/postgres-derive/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index 9ca8ec20b..fd17b9de6 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -1,3 +1,5 @@ +//! An internal crate for `postgres-types`. + #![recursion_limit = "256"] extern crate proc_macro; diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 41e056285..ffae62e6b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -17,7 +17,7 @@ with-serde_json-1 = ["serde-1", "serde_json-1"] bytes = "0.4" fallible-iterator = "0.2" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } -postgres-derive = { version = "0.3.3", optional = true, path = "../postgres-derive" } +postgres-derive = { version = "=0.4.0-alpha.1", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } From 4745cda7b7f7a0837072c44d6c03f91b7d9eccd3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 17:56:10 -0700 Subject: [PATCH 281/819] Release postgres-types v0.1.0-alpha.1 --- postgres-derive/CHANGELOG.md | 5 + postgres-types/Cargo.toml | 20 ++-- postgres-types/LICENSE-APACHE | 201 ++++++++++++++++++++++++++++++++++ postgres-types/LICENSE-MIT | 22 ++++ postgres-types/src/lib.rs | 3 +- tokio-postgres/Cargo.toml | 2 +- 6 files changed, 244 insertions(+), 9 deletions(-) create mode 100644 postgres-derive/CHANGELOG.md create mode 100644 postgres-types/LICENSE-APACHE create mode 100644 postgres-types/LICENSE-MIT diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md new file mode 100644 index 000000000..79de77480 --- /dev/null +++ b/postgres-derive/CHANGELOG.md @@ -0,0 +1,5 @@ +# Change Log + +## v0.1.0-alpha.1 - 2019-10-14 + +* Initial release diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index ffae62e6b..6026ea6f2 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,17 +1,23 @@ [package] name = "postgres-types" -version = "0.1.0" +version = "0.1.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" +license = "MIT/Apache-2.0" +description = "Conversions between Rust and Postgres values" +repository = "https://github.com/sfackler/rust-postgres" +readme = "../README.md" +keywords = ["database", "postgres", "postgresql", "sql"] +categories = ["database"] [features] -"derive" = ["postgres-derive"] -"with-bit-vec-0_6" = ["bit-vec-06"] -"with-chrono-0_4" = ["chrono-04"] -"with-eui48-0_4" = ["eui48-04"] -"with-geo-types-0_4" = ["geo-types-04"] +derive = ["postgres-derive"] +with-bit-vec-0_6 = ["bit-vec-06"] +with-chrono-0_4 = ["chrono-04"] +with-eui48-0_4 = ["eui48-04"] +with-geo-types-0_4 = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] -"with-uuid-0_7" = ["uuid-07"] +with-uuid-0_7 = ["uuid-07"] [dependencies] bytes = "0.4" diff --git a/postgres-types/LICENSE-APACHE b/postgres-types/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/postgres-types/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/postgres-types/LICENSE-MIT b/postgres-types/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/postgres-types/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index b6d8f7421..264442b3f 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -105,7 +105,8 @@ //! Happy, //! } //! ``` -#![warn(missing_docs)] +#![doc(html_root_url = "https://docs.rs/postgres-types/0.1")] +#![warn(clippy::all, rust_2018_idioms, missing_docs)] use fallible_iterator::FallibleIterator; use postgres_protocol; diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 98b160c1c..b2a2d9c67 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -41,7 +41,7 @@ pin-utils = "=0.1.0-alpha.4" pin-project = "0.4" phf = "0.7.23" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } -postgres-types = { version = "0.1.0", path = "../postgres-types" } +postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } [dev-dependencies] From afa466fb256727464c3a2b55cbfe5c971f628d32 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 18:14:11 -0700 Subject: [PATCH 282/819] Release tokio-postgres v0.5.0-alpha.1 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 20 ++++++++++++++++++++ tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/lib.rs | 2 +- 6 files changed, 25 insertions(+), 5 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index c56b0831c..1b8e18895 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -20,7 +20,7 @@ futures-preview = "=0.3.0-alpha.19" native-tls = "0.2" tokio-io = "=0.2.0-alpha.6" tokio-tls = "=0.3.0-alpha.6" -tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "=0.2.0-alpha.6" diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 60be19c5b..faddd951b 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -20,7 +20,7 @@ futures-preview = "=0.3.0-alpha.19" openssl = "0.10" tokio-io = "=0.2.0-alpha.6" tokio-openssl = "=0.4.0-alpha.6" -tokio-postgres = { version = "0.4.0-rc.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "=0.2.0-alpha.6" diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index d07664eba..5588b9e95 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -32,7 +32,7 @@ bytes = "0.4" fallible-iterator = "0.2" futures-preview = "=0.3.0-alpha.19" pin-utils = "=0.1.0-alpha.4" -tokio-postgres = { version = "0.4.0-rc.2", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } tokio-executor = "=0.2.0-alpha.6" tokio = { version = "=0.2.0-alpha.6", optional = true } diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index d55f7f9af..7ed4b4dec 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,25 @@ # Change Log +## v0.5.0-alpha.1 - 2019-10-14 + +### Changed + +* The library now uses `std::futures::Future` and async/await syntax. +* Most methods now take `&self` rather than `&mut self`. +* The transaction API has changed to more closely resemble the synchronous API and is significantly more ergonomic. +* Methods now take `&[&(dyn ToSql + Sync)]` rather than `&[&dyn ToSql]` to allow futures to be `Send`. +* Methods are now "normal" async functions that no longer do work up-front. +* Statements are no longer required to be prepared explicitly before use. Methods taking `&Statement` can now also take + `&str`, and will internally prepare the statement. +* `ToSql` now serializes its value into a `BytesMut` rather than `Vec`. +* Methods that previously returned `Stream`s now return `Vec`. New `*_raw` methods still provide a `Stream` + interface. + +### Added + +* Added the `channel_binding=disable/allow/require` configuration to control use of channel binding. +* Added the `Client::query_one` method to cover the common case of a query that returns exactly one row. + ## v0.4.0-rc.3 - 2019-06-29 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b2a2d9c67..5e685c462 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.4.0-rc.3" +version = "0.5.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 447955d30..ebffbb90a 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -101,7 +101,7 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` //! crates, respectively. -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.4.0-rc.3")] +#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] pub use crate::client::Client; From ae230fb012b9bb8b6408a52cb6c31b207209147c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 18:17:00 -0700 Subject: [PATCH 283/819] Release tokio-native-tls v0.3.0-alpha.1 --- postgres-native-tls/CHANGELOG.md | 8 ++++++++ postgres-native-tls/Cargo.toml | 2 +- postgres-native-tls/src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index 46fab3005..5a8a37ec7 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,13 @@ # Change Log +## v0.3.0-alpha.1 - 2019-10-14 + +### Changed + +* Updated to tokio-postgres v0.5.0-alpha.1. + ## v0.2.0-rc.1 - 2019-06-29 +### Changed + * Updated to tokio-postgres v0.4.0-rc. diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 1b8e18895..8759505b1 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.2.0-rc.1" +version = "0.3.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index b231586ac..f9e67fa0d 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -45,7 +45,7 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.2.0-rc.1")] +#![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use std::future::Future; From 7f7ef7560a4f7a2400c02a3b0d9079f86521b964 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 18:20:28 -0700 Subject: [PATCH 284/819] Release postgres-openssl v0.3.0-alpha.1 --- postgres-openssl/CHANGELOG.md | 8 ++++++++ postgres-openssl/Cargo.toml | 4 ++-- postgres-openssl/src/lib.rs | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index 90d09aad0..683902404 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,13 @@ # Change Log +## v0.3.0-alpha.1 - 2019-10-14 + +### Changed + +* Updated to `tokio-postgres` v0.5.0-alpha.1 + ## v0.2.0-rc.1 - 2019-03-06 +### Changed + * Updated to `tokio-postgres` v0.4.0-rc. diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index faddd951b..9316b60c9 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.2.0-rc.1" +version = "0.3.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -20,7 +20,7 @@ futures-preview = "=0.3.0-alpha.19" openssl = "0.10" tokio-io = "=0.2.0-alpha.6" tokio-openssl = "=0.4.0-alpha.6" -tokio-postgres = { version = "0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = "=0.2.0-alpha.6" diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index ff7c3d7aa..2be536d28 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -39,7 +39,7 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-openssl/0.2.0-rc.1")] +#![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] #[cfg(feature = "runtime")] From 2a8f7bdd96221a53357490995074797a738599e2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 14 Oct 2019 18:24:04 -0700 Subject: [PATCH 285/819] Release postgres v0.17.0-alpha.1 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 14 +++++++------- postgres/src/lib.rs | 2 +- 5 files changed, 16 insertions(+), 10 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 8759505b1..e9ab95f08 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", defau [dev-dependencies] tokio = "=0.2.0-alpha.6" -postgres = { version = "0.16.0-rc.1", path = "../postgres" } +postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 9316b60c9..1e9787c8e 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", defau [dev-dependencies] tokio = "=0.2.0-alpha.6" -postgres = { version = "0.16.0-rc.1", path = "../postgres" } +postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 151251ef0..9cd1801b1 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.17.0-alpha.1 - 2019-10-14 + +### Changed + +* Updated `tokio-postgres` to 0.5.0-alpha.1. + ## v0.16.0-rc.2 - 2019-06-29 ### Fixed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 5588b9e95..e75ed4494 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.16.0-rc.2" +version = "0.17.0-alpha.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -20,12 +20,12 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] -"with-bit-vec-0_6" = ["tokio-postgres/with-bit-vec-0_6"] -"with-chrono-0_4" = ["tokio-postgres/with-chrono-0_4"] -"with-eui48-0_4" = ["tokio-postgres/with-eui48-0_4"] -"with-geo-types-0_4" = ["tokio-postgres/with-geo-types-0_4"] -"with-serde_json-1" = ["tokio-postgres/with-serde_json-1"] -"with-uuid-0_7" = ["tokio-postgres/with-uuid-0_7"] +with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] +with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] +with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] +with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] +with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] +with-uuid-0_7 = ["tokio-postgres/with-uuid-0_7"] [dependencies] bytes = "0.4" diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index ae52f2a34..b8e5a76b8 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -52,7 +52,7 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` //! crates, respectively. -#![doc(html_root_url = "https://docs.rs/postgres/0.16.0-rc.2")] +#![doc(html_root_url = "https://docs.rs/postgres/0.17")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] #[cfg(feature = "runtime")] From 9d2ec747ef4b86c00df3bf4dd6b4b6c47d4e96a5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 15 Oct 2019 18:17:10 -0700 Subject: [PATCH 286/819] Add some simple benchmarks --- tokio-postgres/Cargo.toml | 5 +++ tokio-postgres/benches/bench.rs | 61 ++++++++++++++++++++++++++++++ tokio-postgres/src/lib.rs | 4 +- tokio-postgres/src/query.rs | 4 +- tokio-postgres/src/simple_query.rs | 4 +- 5 files changed, 72 insertions(+), 6 deletions(-) create mode 100644 tokio-postgres/benches/bench.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 5e685c462..0cd04bd0e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -13,6 +13,10 @@ categories = ["database"] [lib] test = false +[[bench]] +name = "bench" +harness = false + [package.metadata.docs.rs] all-features = true @@ -47,6 +51,7 @@ tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io" [dev-dependencies] tokio = "=0.2.0-alpha.6" env_logger = "0.5" +criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono" } diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs new file mode 100644 index 000000000..4369242fc --- /dev/null +++ b/tokio-postgres/benches/bench.rs @@ -0,0 +1,61 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use futures::channel::oneshot; +use futures::executor; +use std::sync::Arc; +use std::time::Instant; +use tokio::runtime::Runtime; +use tokio_postgres::{Client, NoTls}; + +fn setup() -> (Client, Runtime) { + let runtime = Runtime::new().unwrap(); + let (client, conn) = runtime + .block_on(tokio_postgres::connect( + "host=localhost port=5433 user=postgres", + NoTls, + )) + .unwrap(); + runtime.spawn(async { conn.await.unwrap() }); + (client, runtime) +} + +fn query_prepared(c: &mut Criterion) { + let (client, runtime) = setup(); + let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); + c.bench_function("runtime_block_on", move |b| { + b.iter(|| { + runtime + .block_on(client.query(&statement, &[&1i64])) + .unwrap() + }) + }); + + let (client, runtime) = setup(); + let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); + c.bench_function("executor_block_on", move |b| { + b.iter(|| { + executor::block_on(client.query(&statement, &[&1i64])).unwrap() + }) + }); + + let (client, runtime) = setup(); + let client = Arc::new(client); + let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); + c.bench_function("spawned", move |b| { + b.iter_custom(|iters| { + let (tx, rx) = oneshot::channel(); + let client = client.clone(); + let statement = statement.clone(); + runtime.spawn(async move { + let start = Instant::now(); + for _ in 0..iters { + client.query(&statement, &[&1i64]).await.unwrap(); + } + tx.send(start.elapsed()).unwrap(); + }); + executor::block_on(rx).unwrap() + }) + }); +} + +criterion_group!(benches, query_prepared); +criterion_main!(benches); diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index ebffbb90a..3124d2bf3 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -110,12 +110,12 @@ pub use crate::connection::Connection; use crate::error::DbError; pub use crate::error::Error; pub use crate::portal::Portal; +pub use crate::query::RowStream; pub use crate::row::{Row, SimpleQueryRow}; +pub use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; -pub use crate::query::RowStream; -pub use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 9d3829c5f..5a1b7b491 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -1,16 +1,16 @@ use crate::client::{InnerClient, Responses}; -use pin_project::pin_project; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::{IsNull, ToSql}; use crate::{Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; use futures::{ready, Stream}; +use pin_project::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; +use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -use std::marker::PhantomPinned; pub async fn query<'a, I>( client: &InnerClient, diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 8f31cd372..018583741 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -5,13 +5,13 @@ use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::{ready, Stream}; +use pin_project::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; +use std::marker::PhantomPinned; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use std::marker::PhantomPinned; -use pin_project::pin_project; pub async fn simple_query(client: &InnerClient, query: &str) -> Result { let buf = encode(client, query)?; From 7a95f6a3e4bdc334512a35205c4e7462df07e4af Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 18 Oct 2019 22:33:33 -0400 Subject: [PATCH 287/819] pin_mut is in futures now --- tokio-postgres/Cargo.toml | 1 - tokio-postgres/benches/bench.rs | 4 +--- tokio-postgres/src/client.rs | 4 +--- tokio-postgres/src/connect.rs | 4 +--- tokio-postgres/src/copy_in.rs | 4 +--- tokio-postgres/src/prepare.rs | 3 +-- 6 files changed, 5 insertions(+), 15 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 0cd04bd0e..40f62a17b 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -41,7 +41,6 @@ futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] } log = "0.4" parking_lot = "0.9" percent-encoding = "1.0" -pin-utils = "=0.1.0-alpha.4" pin-project = "0.4" phf = "0.7.23" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs index 4369242fc..fececa2b5 100644 --- a/tokio-postgres/benches/bench.rs +++ b/tokio-postgres/benches/bench.rs @@ -32,9 +32,7 @@ fn query_prepared(c: &mut Criterion) { let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("executor_block_on", move |b| { - b.iter(|| { - executor::block_on(client.query(&statement, &[&1i64])).unwrap() - }) + b.iter(|| executor::block_on(client.query(&statement, &[&1i64])).unwrap()) }); let (client, runtime) = setup(); diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index e0133bb3b..34d11b5fb 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -21,10 +21,8 @@ use crate::{Error, Statement}; use bytes::{BytesMut, IntoBuf}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, TryStream, TryStreamExt}; -use futures::{ready, StreamExt}; +use futures::{future, pin_mut, ready, StreamExt, TryStream, TryStreamExt}; use parking_lot::Mutex; -use pin_utils::pin_mut; use postgres_protocol::message::backend::Message; use std::collections::HashMap; use std::error; diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 5452d1e7c..db2ddc85f 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -4,9 +4,7 @@ use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::{future, Future}; -use futures::{FutureExt, Stream}; -use pin_utils::pin_mut; +use futures::{future, pin_mut, Future, FutureExt, Stream}; use std::io; use std::task::Poll; diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index ec9399290..f4ba95cb7 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -5,9 +5,7 @@ use crate::types::ToSql; use crate::{query, Error, Statement}; use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use futures::channel::mpsc; -use futures::ready; -use futures::{SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; -use pin_utils::pin_mut; +use futures::{pin_mut, ready, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use postgres_protocol::message::frontend::CopyData; diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index c00e9127b..450d3b0bd 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -7,8 +7,7 @@ use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures::TryStreamExt; -use pin_utils::pin_mut; +use futures::{pin_mut, TryStreamExt}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; From a943a0e6666085c1f4acdacda143d79eecf83b4d Mon Sep 17 00:00:00 2001 From: mibac138 <5672750+mibac138@users.noreply.github.com> Date: Sat, 19 Oct 2019 19:37:28 +0200 Subject: [PATCH 288/819] Support uuid 0.8 --- postgres-types/Cargo.toml | 2 ++ postgres-types/src/lib.rs | 2 ++ postgres-types/src/uuid_08.rs | 25 ++++++++++++++++++++++ postgres/CHANGELOG.md | 5 +++++ postgres/Cargo.toml | 1 + tokio-postgres/CHANGELOG.md | 5 +++++ tokio-postgres/Cargo.toml | 3 +++ tokio-postgres/tests/test/types/mod.rs | 2 ++ tokio-postgres/tests/test/types/uuid_08.rs | 18 ++++++++++++++++ 9 files changed, 63 insertions(+) create mode 100644 postgres-types/src/uuid_08.rs create mode 100644 tokio-postgres/tests/test/types/uuid_08.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 6026ea6f2..7ce393624 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -18,6 +18,7 @@ with-eui48-0_4 = ["eui48-04"] with-geo-types-0_4 = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_7 = ["uuid-07"] +with-uuid-0_8 = ["uuid-08"] [dependencies] bytes = "0.4" @@ -32,3 +33,4 @@ geo-types-04 = { version = "0.4", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-07 = { version = "0.7", package = "uuid", optional = true } +uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 264442b3f..79144546b 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -200,6 +200,8 @@ mod geo_types_04; mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; +#[cfg(feature = "with-uuid-0_8")] +mod uuid_08; #[doc(hidden)] pub mod private; diff --git a/postgres-types/src/uuid_08.rs b/postgres-types/src/uuid_08.rs new file mode 100644 index 000000000..72d5e82fc --- /dev/null +++ b/postgres-types/src/uuid_08.rs @@ -0,0 +1,25 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; +use uuid_08::Uuid; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Uuid { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let bytes = types::uuid_from_sql(raw)?; + Ok(Uuid::from_bytes(bytes)) + } + + accepts!(UUID); +} + +impl ToSql for Uuid { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::uuid_to_sql(*self.as_bytes(), w); + Ok(IsNull::No) + } + + accepts!(UUID); + to_sql_checked!(); +} diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 9cd1801b1..1e25bf5bb 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,10 @@ # Change Log +## Unreleased + +### Added +* Added support for converting to and from `uuid` crate v0.8 + ## v0.17.0-alpha.1 - 2019-10-14 ### Changed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index e75ed4494..2e9726e07 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -26,6 +26,7 @@ with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_7 = ["tokio-postgres/with-uuid-0_7"] +with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] [dependencies] bytes = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 7ed4b4dec..51a3bf8b3 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,10 @@ # Change Log +## Unreleased + +### Added +* Added support for converting to and from `uuid` crate v0.8 + ## v0.5.0-alpha.1 - 2019-10-14 ### Changed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 40f62a17b..734f3145e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,6 +33,7 @@ with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_7 = ["postgres-types/with-uuid-0_7"] +with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] [dependencies] bytes = "0.4" @@ -59,3 +60,5 @@ geo-types-04 = { version = "0.4", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-07 = { version = "0.7", package = "uuid" } +uuid-08 = { version = "0.8", package = "uuid" } + diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 3daabcd87..8d411d54e 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -24,6 +24,8 @@ mod geo_010; mod serde_json_1; #[cfg(feature = "with-uuid-0_7")] mod uuid_07; +#[cfg(feature = "with-uuid-0_8")] +mod uuid_08; async fn test_type(sql_type: &str, checks: &[(T, S)]) where diff --git a/tokio-postgres/tests/test/types/uuid_08.rs b/tokio-postgres/tests/test/types/uuid_08.rs new file mode 100644 index 000000000..01b674b97 --- /dev/null +++ b/tokio-postgres/tests/test/types/uuid_08.rs @@ -0,0 +1,18 @@ +use uuid_08::Uuid; + +use crate::types::test_type; + +#[tokio::test] +async fn test_uuid_params() { + test_type( + "UUID", + &[ + ( + Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), + "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", + ), + (None, "NULL"), + ], + ) + .await +} From 9a83196e23c65b0c3c4ec5faa92cda001dbb1b08 Mon Sep 17 00:00:00 2001 From: mibac138 <5672750+mibac138@users.noreply.github.com> Date: Sat, 19 Oct 2019 19:53:54 +0200 Subject: [PATCH 289/819] Update dependencies --- codegen/Cargo.toml | 6 +++--- codegen/src/sqlstate.rs | 16 +++++++--------- postgres-protocol/Cargo.toml | 4 ++-- tokio-postgres/Cargo.toml | 6 +++--- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index c49248844..a536cbd61 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.7.22" -regex = "0.1" +phf_codegen = "0.8" +regex = "1.3.1" marksman_escape = "0.1" -linked-hash-map = "0.4" +linked-hash-map = "0.5" diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index 3d7a61430..79a696382 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -82,17 +82,15 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< } fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter) { - write!( - file, - " -#[rustfmt::skip] -static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = " - ) - .unwrap(); let mut builder = phf_codegen::Map::new(); for (code, names) in codes { builder.entry(&**code, &format!("SqlState::{}", &names[0])); } - builder.build(file).unwrap(); - writeln!(file, ";").unwrap(); + write!( + file, + " +#[rustfmt::skip] +static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = \n{};\n", + builder.build() + ).unwrap(); } diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index bb34c0a96..13d13c435 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,10 +13,10 @@ base64 = "0.10" byteorder = "1.0" bytes = "0.4" fallible-iterator = "0.2" -generic-array = "0.12" +generic-array = "0.13" hmac = "0.7" md5 = "0.6" memchr = "2.0" -rand = "0.6" +rand = "0.7" sha2 = "0.8" stringprep = "0.1" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 734f3145e..f255cab45 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -41,16 +41,16 @@ fallible-iterator = "0.2" futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] } log = "0.4" parking_lot = "0.9" -percent-encoding = "1.0" +percent-encoding = "2.0" pin-project = "0.4" -phf = "0.7.23" +phf = "0.8" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } [dev-dependencies] tokio = "=0.2.0-alpha.6" -env_logger = "0.5" +env_logger = "0.7" criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } From 286f41d423f5baff5bc4a085ad41e569c35328da Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Oct 2019 18:04:58 -0700 Subject: [PATCH 290/819] Remove out of date README Each individual subcrate will gain its own README for display on crates.io --- README.md | 361 +++--------------------------------------------------- 1 file changed, 17 insertions(+), 344 deletions(-) diff --git a/README.md b/README.md index 3a7cf7e2a..cbe7182c6 100644 --- a/README.md +++ b/README.md @@ -1,361 +1,34 @@ # Rust-Postgres -[![CircleCI](https://circleci.com/gh/sfackler/rust-postgres.svg?style=shield)](https://circleci.com/gh/sfackler/rust-postgres) [![Latest Version](https://img.shields.io/crates/v/postgres.svg)](https://crates.io/crates/postgres) +[![CircleCI](https://circleci.com/gh/sfackler/rust-postgres.svg?style=shield)](https://circleci.com/gh/sfackler/rust-postgres) -A native PostgreSQL driver for Rust. +PostgreSQL support for Rust. -[Documentation](https://docs.rs/postgres) - -You can integrate Rust-Postgres into your project through the [releases on crates.io](https://crates.io/crates/postgres): -```toml -[dependencies] -postgres = "0.15" -``` - -## Overview -Rust-Postgres is a pure-Rust frontend for the popular PostgreSQL database. -```rust -extern crate postgres; - -use postgres::{Connection, TlsMode}; - -struct Person { - id: i32, - name: String, - data: Option>, -} - -fn main() { - let conn = Connection::connect("postgres://postgres@localhost:5433", TlsMode::None).unwrap(); - conn.execute("CREATE TABLE person ( - id SERIAL PRIMARY KEY, - name VARCHAR NOT NULL, - data BYTEA - )", &[]).unwrap(); - let me = Person { - id: 0, - name: "Steven".to_string(), - data: None, - }; - conn.execute("INSERT INTO person (name, data) VALUES ($1, $2)", - &[&me.name, &me.data]).unwrap(); - for row in &conn.query("SELECT id, name, data FROM person", &[]).unwrap() { - let person = Person { - id: row.get(0), - name: row.get(1), - data: row.get(2), - }; - println!("Found person {}: {}", person.id, person.name); - } -} -``` - -## Requirements -* **Rust** - Rust-Postgres is developed against the 1.18 release of Rust - available on http://www.rust-lang.org. It should also compile against more - recent releases. - -* **PostgreSQL 7.4 or later** - Rust-Postgres speaks version 3 of the - PostgreSQL protocol, which corresponds to versions 7.4 and later. If your - version of Postgres was compiled in the last decade, you should be okay. - -## Usage - -### Connecting -Connect to a Postgres server using the standard URI format: -```rust -let conn = Connection::connect("postgres://user:pass@host:port/database?arg1=val1&arg2=val2", - TlsMode::None)?; -``` -`pass` may be omitted if not needed. `port` defaults to `5432` and `database` -defaults to the value of `user` if not specified. The driver supports `trust`, -`password`, and `md5` authentication. - -Unix domain sockets can be used as well. The `host` portion of the URI should -be set to the absolute path to the directory containing the socket file. Since -`/` is a reserved character in URLs, the path should be URL encoded. If Postgres -stored its socket files in `/run/postgres`, the connection would then look like: -```rust -let conn = Connection::connect("postgres://postgres@%2Frun%2Fpostgres", TlsMode::None)?; -``` -Paths which contain non-UTF8 characters can be handled in a different manner; -see the documentation for details. - -### Querying -SQL statements can be executed with the `query` and `execute` methods. Both -methods take a query string as well as a slice of parameters to bind to the -query. The `i`th query parameter is specified in the query string by `$i`. Note -that query parameters are 1-indexed rather than the more common 0-indexing. - -`execute` returns the number of rows affected by the query (or 0 if not -applicable): -```rust -let updates = conn.execute("UPDATE foo SET bar = $1 WHERE baz = $2", &[&1i32, &"biz"])?; -println!("{} rows were updated", updates); -``` - -`query` returns an iterable object holding the rows returned from the database. -The fields in a row can be accessed either by their indices or their column -names, though access by index is more efficient. Unlike statement parameters, -result columns are zero-indexed. -```rust -for row in &conn.query("SELECT bar, baz FROM foo WHERE buz = $1", &[&1i32])? { - let bar: i32 = row.get(0); - let baz: String = row.get("baz"); - println!("bar: {}, baz: {}", bar, baz); -} -``` - -### Statement Preparation -If the same statement will be executed repeatedly (possibly with different -parameters), explicitly preparing it can improve performance: - -```rust -let stmt = conn.prepare("UPDATE foo SET bar = $1 WHERE baz = $2")?; -for (bar, baz) in updates { - stmt.execute(&[bar, baz])?; -} -``` - -### Transactions -The `transaction` method will start a new transaction. It returns a -`Transaction` object which has the functionality of a -`Connection` as well as methods to control the result of the -transaction: -```rust -let trans = conn.transaction()?; +## postgres [![Latest Version](https://img.shields.io/crates/v/postgres.svg)](https://crates.io/crates/postgres) -trans.execute(...)?; -let stmt = trans.prepare(...)?; -// ... - -trans.commit()?; -``` -The transaction will be active until the `Transaction` object falls out of -scope. A transaction will roll back by default. Nested transactions are -supported via savepoints. - -### Type Correspondence -Rust-Postgres enforces a strict correspondence between Rust types and Postgres -types. The driver currently supports the following conversions: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Rust TypePostgres Type
boolBOOL
i8"char"
i16SMALLINT, SMALLSERIAL
i32INT, SERIAL
u32OID
i64BIGINT, BIGSERIAL
f32REAL
f64DOUBLE PRECISION
str/StringVARCHAR, CHAR(n), TEXT, CITEXT, NAME
[u8]/Vec<u8>BYTEA
- postgres::types::Json - and - serde_json::Value - (optional) - JSON, JSONB
- time::Timespec - and - chrono::NaiveDateTime - (optional) - TIMESTAMP
- time::Timespec, - chrono::DateTime<Utc>, - chrono::DateTime<Local>, - and - chrono::DateTime<FixedOffset> - (optional) - TIMESTAMP WITH TIME ZONE
- chrono::NaiveDate - (optional) - DATE
- chrono::NaiveTime - (optional) - TIME
- uuid::Uuid - (optional) - UUID
- bit_vec::BitVec - (optional) - BIT, VARBIT
HashMap<String, Option<String>>HSTORE
- eui48::MacAddress - (optional) - MACADDR
- geo::Point<f64> - (optional) - POINT
- geo::Bbox<f64> - (optional) - BOX
- geo::LineString<f64> - (optional) - PATH
- -`Option` implements `FromSql` where `T: FromSql` and `ToSql` where `T: -ToSql`, and represents nullable Postgres values. - -`&[T]` and `Vec` implement `ToSql` where `T: ToSql`, and `Vec` -additionally implements `FromSql` where `T: FromSql`, which represent -one-dimensional Postgres arrays. - -More conversions can be defined by implementing the `ToSql` and `FromSql` -traits. - -The [postgres-derive](https://github.com/sfackler/rust-postgres-derive) -crate will synthesize `ToSql` and `FromSql` implementations for enum, domain, -and composite Postgres types. - -Full support for array types is located in the -[postgres-array](https://github.com/sfackler/rust-postgres-array) crate. - -Support for range types is located in the -[postgres-range](https://github.com/sfackler/rust-postgres-range) crate. - -Support for the large object API is located in the -[postgres-large-object](https://github.com/sfackler/rust-postgres-large-object) -crate. - -## Optional features - -### UUID type - -[UUID](http://www.postgresql.org/docs/9.4/static/datatype-uuid.html) support is -provided optionally by the `with-uuid` feature, which adds `ToSql` and `FromSql` -implementations for `uuid`'s `Uuid` type. Requires `uuid` version 0.5. - -### JSON/JSONB types - -[JSON and JSONB](http://www.postgresql.org/docs/9.4/static/datatype-json.html) -support is provided optionally by the `with-serde_json-1` feature, which adds -`ToSql` and `FromSql` implementations for `serde_json`'s `Value` type, -as well as adding a generic `Json` type with those same implementations. -Requires `serde_json` version 1.0. - -### TIMESTAMP/TIMESTAMPTZ/DATE/TIME types +[Documentation](https://docs.rs/postgres) -[Date and Time](http://www.postgresql.org/docs/9.1/static/datatype-datetime.html) -support is provided optionally by the `with-time` feature, which adds `ToSql` -and `FromSql` implementations for `time`'s `Timespec` type, or the `with-chrono` -feature, which adds `ToSql` and `FromSql` implementations for `chrono`'s -`DateTime`, `NaiveDateTime`, `NaiveDate` and `NaiveTime` types. Requires `time` version 0.1.14. +A native, synchronous PostgreSQL client. -### BIT/VARBIT types +## tokio-postgres [![Latest Version](https://img.shields.io/crates/v/tokio-postgres.svg)](https://crates.io/crates/tokio-postgres) -[BIT and VARBIT](http://www.postgresql.org/docs/9.4/static/datatype-bit.html) -support is provided optionally by the `with-bit-vec` feature, which adds `ToSql` -and `FromSql` implementations for `bit-vec`'s `BitVec` type. Requires `bit-vec` version 0.4. +[Documentation](https://docs.rs/tokio-postgres) -### MACADDR type +A native, asynchronous PostgreSQL client. -[MACADDR](http://www.postgresql.org/docs/9.4/static/datatype-net-types.html#DATATYPE-MACADDR) -support is provided optionally by the `with-eui48` feature, which adds `ToSql` -and `FromSql` implementations for `eui48`'s `MacAddress` type. Requires `eui48` version 0.3. +## postgres-types [![Latest Version](https://img.shields.io/crates/v/postgres-types.svg)](https://crates.io/crates/postgres-types) -### POINT type +[Documentation](https://docs.rs/postgres-types) -[POINT](https://www.postgresql.org/docs/9.4/static/datatype-geometric.html#AEN6799) -support is provided optionally by the `with-geo` feature, which adds `ToSql` and `FromSql` implementations for `geo`'s `Point` type. Requires `geo` version 0.4. +Conversions between Rust and Postgres types. -### BOX type +## postgres-native-tls [![Latest Version](https://img.shields.io/crates/v/postgres-native-tls.svg)](https://crates.io/crates/postgres-native-tls) -[BOX](https://www.postgresql.org/docs/9.4/static/datatype-geometric.html#AEN6883) -support is provided optionally by the `with-geo` feature, which adds `ToSql` and `FromSql` implementations for `geo`'s `Bbox` type. Requires `geo` version 0.4. +[Documentation](https://docs.rs/postgres-native-tls) -### PATH type +TLS support for postgres and tokio-postgres via native-tls. -[PATH](https://www.postgresql.org/docs/9.4/static/datatype-geometric.html#AEN6912) -support is provided optionally by the `with-geo` feature, which adds `ToSql` and `FromSql` implementations for `geo`'s `LineString` type. -Paths converted from LineString are always treated as "open" paths. Requires `geo` version 0.4. Use the -[pclose](https://www.postgresql.org/docs/8.2/static/functions-geometry.html#FUNCTIONS-GEOMETRY-FUNC-TABLE) -geometric function to insert a closed path. +## postgres-openssl [![Latest Version](https://img.shields.io/crates/v/postgres-openssl.svg)](https://crates.io/crates/postgres-openssl) -## See Also +[Documentation](https://docs.rs/postgres-openssl) -- [r2d2-postgres](https://github.com/sfackler/r2d2-postgres) for connection pool support. +TLS support for postgres and tokio-postgres via openssl. From 0b5ca1bd68e5f42ccf2c559a286860104c13c3ad Mon Sep 17 00:00:00 2001 From: Ivan Kozik Date: Sun, 27 Oct 2019 13:04:29 +0000 Subject: [PATCH 291/819] Add query_one to postgres::Transaction I believe this was missed in 31855141d2eac62c0b3fff34a5825602c177693d --- postgres/src/transaction.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 7066a3e81..8b857bb06 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -58,6 +58,14 @@ impl<'a> Transaction<'a> { executor::block_on(self.0.query(query, params)) } + /// Like `Client::query_one`. + pub fn query_one(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + executor::block_on(self.0.query_one(query, params)) + } + /// Like `Client::query_raw`. pub fn query_raw<'b, T, I>( &mut self, From dc9d07e2460d43c089fa8914f80cfdf2cc38d73b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 27 Oct 2019 14:24:25 -0700 Subject: [PATCH 292/819] Return a custom TlsStream rather than a ChannelBinding up front --- codegen/src/sqlstate.rs | 3 +- postgres-native-tls/src/lib.rs | 93 ++++++++++++++++--- postgres-openssl/src/lib.rs | 101 ++++++++++++++++++--- tokio-postgres/src/cancel_query_raw.rs | 2 +- tokio-postgres/src/connect_raw.rs | 24 +++-- tokio-postgres/src/connect_tls.rs | 14 +-- tokio-postgres/src/maybe_tls_stream.rs | 14 +++ tokio-postgres/src/tls.rs | 20 +++- tokio-postgres/tests/test/types/uuid_08.rs | 2 +- 9 files changed, 220 insertions(+), 53 deletions(-) diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index 79a696382..bb21be34f 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -92,5 +92,6 @@ fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter = \n{};\n", builder.build() - ).unwrap(); + ) + .unwrap(); } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index f9e67fa0d..add9ea8a8 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -7,7 +7,7 @@ //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! -//! # fn main() -> Result<(), Box> { +//! # fn main() -> Result<(), Box> { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -30,7 +30,7 @@ //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! -//! # fn main() -> Result<(), Box> { +//! # fn main() -> Result<(), Box> { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -48,13 +48,16 @@ #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +use futures::task::Context; +use futures::Poll; use std::future::Future; +use std::io; use std::pin::Pin; -use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_io::{AsyncRead, AsyncWrite, Buf, BufMut}; +use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; -use tokio_tls::TlsStream; #[cfg(test)] mod test; @@ -111,20 +114,88 @@ where type Stream = TlsStream; type Error = native_tls::Error; #[allow(clippy::type_complexity)] - type Future = Pin< - Box, ChannelBinding), native_tls::Error>> + Send>, - >; + type Future = Pin, native_tls::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { let future = async move { let stream = self.connector.connect(&self.domain, stream).await?; - // FIXME https://github.com/tokio-rs/tokio/issues/1383 - let channel_binding = ChannelBinding::none(); - - Ok((stream, channel_binding)) + Ok(TlsStream(stream)) }; Box::pin(future) } } + +/// The stream returned by `TlsConnector`. +pub struct TlsStream(tokio_tls::TlsStream); + +impl AsyncRead for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.0.prepare_uninitialized_buffer(buf) + } + + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut self.0).poll_read(cx, buf) + } + + fn poll_read_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + { + Pin::new(&mut self.0).poll_read_buf(cx, buf) + } +} + +impl AsyncWrite for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.0).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.0).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.0).poll_shutdown(cx) + } + + fn poll_write_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + { + Pin::new(&mut self.0).poll_write_buf(cx, buf) + } +} + +impl tls::TlsStream for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + fn channel_binding(&self) -> ChannelBinding { + // FIXME https://github.com/tokio-rs/tokio/issues/1383 + ChannelBinding::none() + } +} diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 2be536d28..a6d27d631 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -6,7 +6,7 @@ //! use openssl::ssl::{SslConnector, SslMethod}; //! use postgres_openssl::MakeTlsConnector; //! -//! # fn main() -> Result<(), Box> { +//! # fn main() -> Result<(), Box> { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -25,7 +25,7 @@ //! use openssl::ssl::{SslConnector, SslMethod}; //! use postgres_openssl::MakeTlsConnector; //! -//! # fn main() -> Result<(), Box> { +//! # fn main() -> Result<(), Box> { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -42,6 +42,8 @@ #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +use futures::task::Context; +use futures::Poll; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; @@ -51,11 +53,13 @@ use openssl::ssl::SslConnector; use openssl::ssl::{ConnectConfiguration, SslRef}; use std::fmt::Debug; use std::future::Future; +use std::io; use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; -use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_io::{AsyncRead, AsyncWrite, Buf, BufMut}; use tokio_openssl::{HandshakeError, SslStream}; +use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; @@ -99,7 +103,7 @@ impl MakeTlsConnect for MakeTlsConnector where S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, { - type Stream = SslStream; + type Stream = TlsStream; type TlsConnect = TlsConnector; type Error = ErrorStack; @@ -130,29 +134,96 @@ impl TlsConnect for TlsConnector where S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, { - type Stream = SslStream; + type Stream = TlsStream; type Error = HandshakeError; #[allow(clippy::type_complexity)] - type Future = Pin< - Box, ChannelBinding), HandshakeError>> + Send>, - >; + type Future = Pin, HandshakeError>> + Send>>; fn connect(self, stream: S) -> Self::Future { let future = async move { let stream = tokio_openssl::connect(self.ssl, &self.domain, stream).await?; - - let channel_binding = match tls_server_end_point(stream.ssl()) { - Some(buf) => ChannelBinding::tls_server_end_point(buf), - None => ChannelBinding::none(), - }; - - Ok((stream, channel_binding)) + Ok(TlsStream(stream)) }; Box::pin(future) } } +/// The stream returned by `TlsConnector`. +pub struct TlsStream(SslStream); + +impl AsyncRead for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + self.0.prepare_uninitialized_buffer(buf) + } + + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut self.0).poll_read(cx, buf) + } + + fn poll_read_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + { + Pin::new(&mut self.0).poll_read_buf(cx, buf) + } +} + +impl AsyncWrite for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.0).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.0).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.0).poll_shutdown(cx) + } + + fn poll_write_buf( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll> + where + Self: Sized, + { + Pin::new(&mut self.0).poll_write_buf(cx, buf) + } +} + +impl tls::TlsStream for TlsStream +where + S: AsyncRead + AsyncWrite + Unpin, +{ + fn channel_binding(&self) -> ChannelBinding { + match tls_server_end_point(self.0.ssl()) { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + } + } +} + fn tls_server_end_point(ssl: &SslRef) -> Option> { let cert = ssl.peer_certificate()?; let algo_nid = cert.signature_algorithm().object().nid(); diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index 0dcdd8ba9..c89dc581f 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -16,7 +16,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let (mut stream, _) = connect_tls::connect_tls(stream, mode, tls).await?; + let mut stream = connect_tls::connect_tls(stream, mode, tls).await?; let mut buf = BytesMut::new(); frontend::cancel_request(process_id, secret_key, &mut buf); diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index b96ced036..90fb4165e 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -2,7 +2,7 @@ use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCod use crate::config::{self, Config}; use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; -use crate::tls::{ChannelBinding, TlsConnect}; +use crate::tls::{TlsConnect, TlsStream}; use crate::{Client, Connection, Error}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; @@ -86,7 +86,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let (stream, channel_binding) = connect_tls(stream, config.ssl_mode, tls).await?; + let stream = connect_tls(stream, config.ssl_mode, tls).await?; let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), @@ -94,7 +94,7 @@ where }; startup(&mut stream, config).await?; - authenticate(&mut stream, channel_binding, config).await?; + authenticate(&mut stream, config).await?; let (process_id, secret_key, parameters) = read_info(&mut stream).await?; let (sender, receiver) = mpsc::unbounded(); @@ -132,14 +132,10 @@ where .map_err(Error::io) } -async fn authenticate( - stream: &mut StartupStream, - channel_binding: ChannelBinding, - config: &Config, -) -> Result<(), Error> +async fn authenticate(stream: &mut StartupStream, config: &Config) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, + T: TlsStream + Unpin, { match stream.try_next().await.map_err(Error::io)? { Some(Message::AuthenticationOk) => { @@ -172,7 +168,7 @@ where authenticate_password(stream, output.as_bytes()).await?; } Some(Message::AuthenticationSasl(body)) => { - authenticate_sasl(stream, body, channel_binding, config).await?; + authenticate_sasl(stream, body, config).await?; } Some(Message::AuthenticationKerberosV5) | Some(Message::AuthenticationScmCredential) @@ -225,12 +221,11 @@ where async fn authenticate_sasl( stream: &mut StartupStream, body: AuthenticationSaslBody, - channel_binding: ChannelBinding, config: &Config, ) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, - T: AsyncRead + AsyncWrite + Unpin, + T: TlsStream + Unpin, { let password = config .password @@ -248,7 +243,10 @@ where } } - let channel_binding = channel_binding + let channel_binding = stream + .inner + .get_ref() + .channel_binding() .tls_server_end_point .filter(|_| config.channel_binding != config::ChannelBinding::Disable) .map(sasl::ChannelBinding::tls_server_end_point); diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index d03357b46..03aaa0bc0 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -1,7 +1,7 @@ use crate::config::SslMode; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; -use crate::tls::{ChannelBinding, TlsConnect}; +use crate::tls::TlsConnect; use crate::Error; use bytes::BytesMut; use postgres_protocol::message::frontend; @@ -11,15 +11,15 @@ pub async fn connect_tls( mut stream: S, mode: SslMode, tls: T, -) -> Result<(MaybeTlsStream, ChannelBinding), Error> +) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { match mode { - SslMode::Disable => return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())), + SslMode::Disable => return Ok(MaybeTlsStream::Raw(stream)), SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { - return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())) + return Ok(MaybeTlsStream::Raw(stream)) } SslMode::Prefer | SslMode::Require => {} SslMode::__NonExhaustive => unreachable!(), @@ -36,14 +36,14 @@ where if SslMode::Require == mode { return Err(Error::tls("server does not support TLS".into())); } else { - return Ok((MaybeTlsStream::Raw(stream), ChannelBinding::none())); + return Ok(MaybeTlsStream::Raw(stream)); } } - let (stream, channel_binding) = tls + let stream = tls .connect(stream) .await .map_err(|e| Error::tls(e.into()))?; - Ok((MaybeTlsStream::Tls(stream), channel_binding)) + Ok(MaybeTlsStream::Tls(stream)) } diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs index 9928cef42..a8f0d3a6b 100644 --- a/tokio-postgres/src/maybe_tls_stream.rs +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -1,3 +1,4 @@ +use crate::tls::{ChannelBinding, TlsStream}; use bytes::{Buf, BufMut}; use std::io; use std::pin::Pin; @@ -93,3 +94,16 @@ where } } } + +impl TlsStream for MaybeTlsStream +where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsStream + Unpin, +{ + fn channel_binding(&self) -> ChannelBinding { + match self { + MaybeTlsStream::Raw(_) => ChannelBinding::none(), + MaybeTlsStream::Tls(s) => s.channel_binding(), + } + } +} diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 78940f337..4e852d3f9 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -38,7 +38,7 @@ impl ChannelBinding { #[cfg(feature = "runtime")] pub trait MakeTlsConnect { /// The stream type created by the `TlsConnect` implementation. - type Stream: AsyncRead + AsyncWrite + Unpin; + type Stream: TlsStream + Unpin; /// The `TlsConnect` implementation created by this type. type TlsConnect: TlsConnect; /// The error type returned by the `TlsConnect` implementation. @@ -53,11 +53,11 @@ pub trait MakeTlsConnect { /// An asynchronous function wrapping a stream in a TLS session. pub trait TlsConnect { /// The stream returned by the future. - type Stream: AsyncRead + AsyncWrite + Unpin; + type Stream: TlsStream + Unpin; /// The error returned by the future. type Error: Into>; /// The future returned by the connector. - type Future: Future>; + type Future: Future>; /// Returns a future performing a TLS handshake over the stream. fn connect(self, stream: S) -> Self::Future; @@ -68,6 +68,12 @@ pub trait TlsConnect { } } +/// A TLS-wrapped connection to a PostgreSQL database. +pub trait TlsStream: AsyncRead + AsyncWrite { + /// Returns channel binding information for the session. + fn channel_binding(&self) -> ChannelBinding; +} + /// A `MakeTlsConnect` and `TlsConnect` implementation which simply returns an error. /// /// This can be used when `sslmode` is `none` or `prefer`. @@ -103,7 +109,7 @@ impl TlsConnect for NoTls { pub struct NoTlsFuture(()); impl Future for NoTlsFuture { - type Output = Result<(NoTlsStream, ChannelBinding), NoTlsError>; + type Output = Result; fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { Poll::Ready(Err(NoTlsError(()))) @@ -139,6 +145,12 @@ impl AsyncWrite for NoTlsStream { } } +impl TlsStream for NoTlsStream { + fn channel_binding(&self) -> ChannelBinding { + match *self {} + } +} + /// The error returned by `NoTls`. #[derive(Debug)] pub struct NoTlsError(()); diff --git a/tokio-postgres/tests/test/types/uuid_08.rs b/tokio-postgres/tests/test/types/uuid_08.rs index 01b674b97..237643782 100644 --- a/tokio-postgres/tests/test/types/uuid_08.rs +++ b/tokio-postgres/tests/test/types/uuid_08.rs @@ -14,5 +14,5 @@ async fn test_uuid_params() { (None, "NULL"), ], ) - .await + .await } From a9f371f70c7d9747879df97bf4828fec55200c9b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Nov 2019 09:56:38 -0800 Subject: [PATCH 293/819] Clean up tokio-postgres example a bit --- tokio-postgres/src/lib.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 3124d2bf3..c2467a783 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,7 +3,6 @@ //! # Example //! //! ```no_run -//! use futures::FutureExt; //! use tokio_postgres::{NoTls, Error, Row}; //! //! # #[cfg(not(feature = "runtime"))] fn main() {} @@ -16,18 +15,17 @@ //! //! // The connection object performs the actual communication with the database, //! // so spawn it off to run on its own. -//! let connection = connection.map(|r| { -//! if let Err(e) = r { +//! tokio::spawn(async move { +//! if let Err(e) = connection.await { //! eprintln!("connection error: {}", e); //! } //! }); -//! tokio::spawn(connection); //! //! // Now we can prepare a simple statement that just returns its parameter. //! let stmt = client.prepare("SELECT $1::TEXT").await?; //! -//! // And then execute it, returning a Stream of Rows which we collect into a Vec. -//! let rows: Vec = client +//! // And then execute it, returning a list of the resulting rows. +//! let rows = client //! .query(&stmt, &[&"hello world"]) //! .await?; //! From ba39e2f53b5f1b6967c646a97eb446e0ca09473d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Nov 2019 11:55:59 -0800 Subject: [PATCH 294/819] more example fixes --- tokio-postgres/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index c2467a783..df531af02 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -3,14 +3,14 @@ //! # Example //! //! ```no_run -//! use tokio_postgres::{NoTls, Error, Row}; +//! use tokio_postgres::{NoTls, Error}; //! //! # #[cfg(not(feature = "runtime"))] fn main() {} //! # #[cfg(feature = "runtime")] //! #[tokio::main] // By default, tokio_postgres uses the tokio crate as its runtime. //! async fn main() -> Result<(), Error> { //! // Connect to the database. -//! let (mut client, connection) = +//! let (client, connection) = //! tokio_postgres::connect("host=localhost user=postgres", NoTls).await?; //! //! // The connection object performs the actual communication with the database, From 2f31e5826d34451d99e98e2fd57ea51b14839157 Mon Sep 17 00:00:00 2001 From: Aaron Loucks Date: Sat, 16 Nov 2019 18:59:39 -0500 Subject: [PATCH 295/819] Change connection initialization timezone to UTC --- tokio-postgres/src/connect_raw.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 90fb4165e..b6a8d8d6e 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -109,7 +109,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { - let mut params = vec![("client_encoding", "UTF8"), ("timezone", "GMT")]; + let mut params = vec![("client_encoding", "UTF8"), ("timezone", "UTC")]; if let Some(user) = &config.user { params.push(("user", &**user)); } From cff1189cda0c0aafabb03a8363302cf4e0c7226a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 17 Nov 2019 09:16:41 -0800 Subject: [PATCH 296/819] Include column ID in error Closes #514 --- tokio-postgres/src/error/mod.rs | 10 +++++----- tokio-postgres/src/row.rs | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 695e5923d..4dde62f7f 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -336,7 +336,7 @@ enum Kind { Tls, ToSql(usize), FromSql(usize), - Column, + Column(String), CopyInStream, Closed, Db, @@ -369,13 +369,13 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0.kind { + match &self.0.kind { Kind::Io => fmt.write_str("error communicating with the server")?, Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, Kind::Tls => fmt.write_str("error performing TLS handshake")?, Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, - Kind::Column => fmt.write_str("invalid column")?, + Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, Kind::CopyInStream => fmt.write_str("error from a copy_in stream")?, Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, @@ -454,8 +454,8 @@ impl Error { Error::new(Kind::FromSql(idx), Some(e)) } - pub(crate) fn column() -> Error { - Error::new(Kind::Column, None) + pub(crate) fn column(column: String) -> Error { + Error::new(Kind::Column(column), None) } pub(crate) fn copy_in_stream(e: E) -> Error diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 825f4ee35..03c7635b2 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -146,7 +146,7 @@ impl Row { /// Like `Row::get`, but returns a `Result` rather than panicking. pub fn try_get<'a, I, T>(&'a self, idx: I) -> Result where - I: RowIndex, + I: RowIndex + fmt::Display, T: FromSql<'a>, { self.get_inner(&idx) @@ -154,12 +154,12 @@ impl Row { fn get_inner<'a, I, T>(&'a self, idx: &I) -> Result where - I: RowIndex, + I: RowIndex + fmt::Display, T: FromSql<'a>, { let idx = match idx.__idx(self.columns()) { Some(idx) => idx, - None => return Err(Error::column()), + None => return Err(Error::column(idx.to_string())), }; let ty = self.columns()[idx].type_(); @@ -223,18 +223,18 @@ impl SimpleQueryRow { /// Like `SimpleQueryRow::get`, but returns a `Result` rather than panicking. pub fn try_get(&self, idx: I) -> Result, Error> where - I: RowIndex, + I: RowIndex + fmt::Display, { self.get_inner(&idx) } fn get_inner(&self, idx: &I) -> Result, Error> where - I: RowIndex, + I: RowIndex + fmt::Display, { let idx = match idx.__idx(&self.columns) { Some(idx) => idx, - None => return Err(Error::column()), + None => return Err(Error::column(idx.to_string())), }; let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); From 8ebe8591830f9af19ed8e2e40c29f41777f4ebd6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 17 Nov 2019 18:28:12 -0800 Subject: [PATCH 297/819] Start on binary copy rewrite --- Cargo.toml | 1 + tokio-postgres-binary-copy/Cargo.toml | 17 ++++ tokio-postgres-binary-copy/src/lib.rs | 123 +++++++++++++++++++++++++ tokio-postgres-binary-copy/src/test.rs | 101 ++++++++++++++++++++ 4 files changed, 242 insertions(+) create mode 100644 tokio-postgres-binary-copy/Cargo.toml create mode 100644 tokio-postgres-binary-copy/src/lib.rs create mode 100644 tokio-postgres-binary-copy/src/test.rs diff --git a/Cargo.toml b/Cargo.toml index 4752836a7..4d8dbe78d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "postgres-protocol", "postgres-types", "tokio-postgres", + "tokio-postgres-binary-copy", ] [profile.release] diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml new file mode 100644 index 000000000..e6a70bafa --- /dev/null +++ b/tokio-postgres-binary-copy/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tokio-postgres-binary-copy" +version = "0.1.0" +authors = ["Steven Fackler "] +edition = "2018" + +[dependencies] +bytes = "0.4" +futures-preview = "=0.3.0-alpha.19" +parking_lot = "0.9" +pin-project-lite = "0.1" +tokio-postgres = { version = "=0.5.0-alpha.1", default-features = false, path = "../tokio-postgres" } + +[dev-dependencies] +tokio = "=0.2.0-alpha.6" +tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres" } + diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs new file mode 100644 index 000000000..9e0677d2c --- /dev/null +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -0,0 +1,123 @@ +use bytes::{BigEndian, BufMut, ByteOrder, Bytes, BytesMut}; +use futures::{future, Stream}; +use parking_lot::Mutex; +use pin_project_lite::pin_project; +use std::convert::TryFrom; +use std::error::Error; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use tokio_postgres::types::{IsNull, ToSql, Type}; + +#[cfg(test)] +mod test; + +const BLOCK_SIZE: usize = 4096; + +pin_project! { + pub struct BinaryCopyStream { + #[pin] + future: F, + buf: Arc>, + done: bool, + } +} + +impl BinaryCopyStream +where + F: Future>>, +{ + pub fn new(types: &[Type], write_values: M) -> BinaryCopyStream + where + M: FnOnce(BinaryCopyWriter) -> F, + { + let mut buf = BytesMut::new(); + buf.reserve(11 + 4 + 4); + buf.put_slice(b"PGCOPY\n\xff\r\n\0"); // magic + buf.put_i32_be(0); // flags + buf.put_i32_be(0); // header extension + + let buf = Arc::new(Mutex::new(buf)); + let writer = BinaryCopyWriter { + buf: buf.clone(), + types: types.to_vec(), + idx: 0, + }; + + BinaryCopyStream { + future: write_values(writer), + buf, + done: false, + } + } +} + +impl Stream for BinaryCopyStream +where + F: Future>>, +{ + type Item = Result>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + + if *this.done { + return Poll::Ready(None); + } + + *this.done = this.future.poll(cx)?.is_ready(); + + let mut buf = this.buf.lock(); + if *this.done { + buf.reserve(2); + buf.put_i16_be(-1); + Poll::Ready(Some(Ok(buf.take().freeze()))) + } else if buf.len() > BLOCK_SIZE { + Poll::Ready(Some(Ok(buf.take().freeze()))) + } else { + Poll::Pending + } + } +} + +// FIXME this should really just take a reference to the buffer, but that requires HKT :( +pub struct BinaryCopyWriter { + buf: Arc>, + types: Vec, + idx: usize, +} + +impl BinaryCopyWriter { + pub async fn write( + &mut self, + value: &(dyn ToSql + Send), + ) -> Result<(), Box> { + future::poll_fn(|_| { + if self.buf.lock().len() > BLOCK_SIZE { + Poll::Pending + } else { + Poll::Ready(()) + } + }) + .await; + + let mut buf = self.buf.lock(); + if self.idx == 0 { + buf.reserve(2); + buf.put_i16_be(self.types.len() as i16); + } + let idx = buf.len(); + buf.reserve(4); + buf.put_i32_be(0); + let len = match value.to_sql_checked(&self.types[self.idx], &mut buf)? { + IsNull::Yes => -1, + IsNull::No => i32::try_from(buf.len() - idx - 4)?, + }; + BigEndian::write_i32(&mut buf[idx..], len); + + self.idx = (self.idx + 1) % self.types.len(); + + Ok(()) + } +} diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs new file mode 100644 index 000000000..dcf91b696 --- /dev/null +++ b/tokio-postgres-binary-copy/src/test.rs @@ -0,0 +1,101 @@ +use crate::BinaryCopyStream; +use tokio_postgres::types::Type; +use tokio_postgres::{Client, NoTls}; + +async fn connect() -> Client { + let (client, connection) = + tokio_postgres::connect("host=localhost port=5433 user=postgres", NoTls) + .await + .unwrap(); + tokio::spawn(async { + connection.await.unwrap(); + }); + client +} + +#[tokio::test] +async fn write_basic() { + let client = connect().await; + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar TEXT)") + .await + .unwrap(); + + let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| { + async move { + w.write(&1i32).await?; + w.write(&"foobar").await?; + + w.write(&2i32).await?; + w.write(&None::<&str>).await?; + + Ok(()) + } + }); + + client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) + .await + .unwrap(); + + let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[0].get::<_, Option<&str>>(1), Some("foobar")); + assert_eq!(rows[1].get::<_, i32>(0), 2); + assert_eq!(rows[1].get::<_, Option<&str>>(1), None); +} + +#[tokio::test] +async fn write_many_rows() { + let client = connect().await; + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar TEXT)") + .await + .unwrap(); + + let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| async move { + for i in 0..10_000i32 { + w.write(&i).await?; + w.write(&format!("the value for {}", i)).await?; + } + + Ok(()) + }); + + client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream).await.unwrap(); + + let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + for (i, row) in rows.iter().enumerate() { + assert_eq!(row.get::<_, i32>(0), i as i32); + assert_eq!(row.get::<_, &str>(1), format!("the value for {}", i)); + } +} + +#[tokio::test] +async fn write_big_rows() { + let client = connect().await; + + client.batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)").await.unwrap(); + + let stream = BinaryCopyStream::new(&[Type::INT4, Type::BYTEA], |mut w| { + async move { + for i in 0..2i32 { + w.write(&i).await.unwrap(); + w.write(&vec![i as u8; 128 * 1024]).await.unwrap(); + } + + Ok(()) + } + }); + + client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream).await.unwrap(); + + let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + for (i, row) in rows.iter().enumerate() { + assert_eq!(row.get::<_, i32>(0), i as i32); + assert_eq!(row.get::<_, &[u8]>(1), &*vec![i as u8; 128 * 1024]); + } +} From 6e2435eb6063b3dd14f098dbc49890414c705746 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Nov 2019 18:06:03 -0800 Subject: [PATCH 298/819] Write full rows in binary copy --- tokio-postgres-binary-copy/src/lib.rs | 46 ++++++++++++++-------- tokio-postgres-binary-copy/src/test.rs | 53 +++++++++++++++++--------- 2 files changed, 64 insertions(+), 35 deletions(-) diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index 9e0677d2c..fa42a84a8 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -42,7 +42,6 @@ where let writer = BinaryCopyWriter { buf: buf.clone(), types: types.to_vec(), - idx: 0, }; BinaryCopyStream { @@ -85,14 +84,29 @@ where pub struct BinaryCopyWriter { buf: Arc>, types: Vec, - idx: usize, } impl BinaryCopyWriter { pub async fn write( &mut self, - value: &(dyn ToSql + Send), + values: &[&(dyn ToSql + Send)], ) -> Result<(), Box> { + self.write_raw(values.iter().cloned()).await + } + + pub async fn write_raw<'a, I>(&mut self, values: I) -> Result<(), Box> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let values = values.into_iter(); + assert!( + values.len() == self.types.len(), + "expected {} values but got {}", + self.types.len(), + values.len(), + ); + future::poll_fn(|_| { if self.buf.lock().len() > BLOCK_SIZE { Poll::Pending @@ -103,20 +117,20 @@ impl BinaryCopyWriter { .await; let mut buf = self.buf.lock(); - if self.idx == 0 { - buf.reserve(2); - buf.put_i16_be(self.types.len() as i16); - } - let idx = buf.len(); - buf.reserve(4); - buf.put_i32_be(0); - let len = match value.to_sql_checked(&self.types[self.idx], &mut buf)? { - IsNull::Yes => -1, - IsNull::No => i32::try_from(buf.len() - idx - 4)?, - }; - BigEndian::write_i32(&mut buf[idx..], len); - self.idx = (self.idx + 1) % self.types.len(); + buf.reserve(2); + buf.put_i16_be(self.types.len() as i16); + + for (value, type_) in values.zip(&self.types) { + let idx = buf.len(); + buf.reserve(4); + buf.put_i32_be(0); + let len = match value.to_sql_checked(type_, &mut buf)? { + IsNull::Yes => -1, + IsNull::No => i32::try_from(buf.len() - idx - 4)?, + }; + BigEndian::write_i32(&mut buf[idx..], len); + } Ok(()) } diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs index dcf91b696..486ac5816 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres-binary-copy/src/test.rs @@ -24,11 +24,8 @@ async fn write_basic() { let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| { async move { - w.write(&1i32).await?; - w.write(&"foobar").await?; - - w.write(&2i32).await?; - w.write(&None::<&str>).await?; + w.write(&[&1i32, &"foobar"]).await?; + w.write(&[&2i32, &None::<&str>]).await?; Ok(()) } @@ -39,7 +36,10 @@ async fn write_basic() { .await .unwrap(); - let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + let rows = client + .query("SELECT id, bar FROM foo ORDER BY id", &[]) + .await + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); assert_eq!(rows[0].get::<_, Option<&str>>(1), Some("foobar")); @@ -56,18 +56,25 @@ async fn write_many_rows() { .await .unwrap(); - let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| async move { - for i in 0..10_000i32 { - w.write(&i).await?; - w.write(&format!("the value for {}", i)).await?; - } + let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| { + async move { + for i in 0..10_000i32 { + w.write(&[&i, &format!("the value for {}", i)]).await?; + } - Ok(()) + Ok(()) + } }); - client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream).await.unwrap(); + client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) + .await + .unwrap(); - let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + let rows = client + .query("SELECT id, bar FROM foo ORDER BY id", &[]) + .await + .unwrap(); for (i, row) in rows.iter().enumerate() { assert_eq!(row.get::<_, i32>(0), i as i32); assert_eq!(row.get::<_, &str>(1), format!("the value for {}", i)); @@ -78,22 +85,30 @@ async fn write_many_rows() { async fn write_big_rows() { let client = connect().await; - client.batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)").await.unwrap(); + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)") + .await + .unwrap(); let stream = BinaryCopyStream::new(&[Type::INT4, Type::BYTEA], |mut w| { async move { for i in 0..2i32 { - w.write(&i).await.unwrap(); - w.write(&vec![i as u8; 128 * 1024]).await.unwrap(); + w.write(&[&i, &vec![i as u8; 128 * 1024]]).await?; } Ok(()) } }); - client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream).await.unwrap(); + client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) + .await + .unwrap(); - let rows = client.query("SELECT id, bar FROM foo ORDER BY id", &[]).await.unwrap(); + let rows = client + .query("SELECT id, bar FROM foo ORDER BY id", &[]) + .await + .unwrap(); for (i, row) in rows.iter().enumerate() { assert_eq!(row.get::<_, i32>(0), i as i32); assert_eq!(row.get::<_, &[u8]>(1), &*vec![i as u8; 128 * 1024]); From c98f605622634dc2fc78c0ae750be158eab08eef Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Nov 2019 18:12:34 -0800 Subject: [PATCH 299/819] Switch to pin-project-lite --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/query.rs | 17 +++++++++-------- tokio-postgres/src/simple_query.rs | 17 +++++++++-------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f255cab45..73e05ca3f 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -42,7 +42,7 @@ futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] } log = "0.4" parking_lot = "0.9" percent-encoding = "2.0" -pin-project = "0.4" +pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 5a1b7b491..02415449c 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -5,7 +5,7 @@ use crate::types::{IsNull, ToSql}; use crate::{Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; use futures::{ready, Stream}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::marker::PhantomPinned; @@ -149,13 +149,14 @@ where } } -/// A stream of table rows. -#[pin_project] -pub struct RowStream { - statement: Statement, - responses: Responses, - #[pin] - _p: PhantomPinned, +pin_project! { + /// A stream of table rows. + pub struct RowStream { + statement: Statement, + responses: Responses, + #[pin] + _p: PhantomPinned, + } } impl Stream for RowStream { diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 018583741..c78000ffa 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -5,7 +5,7 @@ use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::{ready, Stream}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::marker::PhantomPinned; @@ -47,13 +47,14 @@ fn encode(client: &InnerClient, query: &str) -> Result { }) } -/// A stream of simple query results. -#[pin_project] -pub struct SimpleQueryStream { - responses: Responses, - columns: Option>, - #[pin] - _p: PhantomPinned, +pin_project! { + /// A stream of simple query results. + pub struct SimpleQueryStream { + responses: Responses, + columns: Option>, + #[pin] + _p: PhantomPinned, + } } impl Stream for SimpleQueryStream { From bccfa83acad728b7c09e294f9cf8610041cb289f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Nov 2019 18:39:36 -0800 Subject: [PATCH 300/819] Expose CopyOut --- tokio-postgres/src/copy_out.rs | 1 + tokio-postgres/src/lib.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index f5d52429d..999afac8d 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -39,6 +39,7 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result { Ok(responses) } +/// A stream of `COPY ... TO STDOUT` query data. pub struct CopyStream { responses: Responses, } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index df531af02..f5636521b 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -114,6 +114,7 @@ pub use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; +pub use crate::copy_out::CopyStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; From 54232439232e1166dda5101a7df0c732164c1f8a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 19 Nov 2019 04:29:31 -0800 Subject: [PATCH 301/819] Make CopyStream !Unpin --- tokio-postgres/src/copy_out.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 999afac8d..8bfd28190 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,4 +1,5 @@ use crate::client::{InnerClient, Responses}; +use pin_project_lite::pin_project; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::ToSql; @@ -8,6 +9,7 @@ use futures::{ready, Stream}; use postgres_protocol::message::backend::Message; use std::pin::Pin; use std::task::{Context, Poll}; +use std::marker::PhantomPinned; pub async fn copy_out<'a, I>( client: &InnerClient, @@ -20,7 +22,7 @@ where { let buf = query::encode(client, &statement, params)?; let responses = start(client, buf).await?; - Ok(CopyStream { responses }) + Ok(CopyStream { responses, _p: PhantomPinned }) } async fn start(client: &InnerClient, buf: Bytes) -> Result { @@ -39,16 +41,22 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result { Ok(responses) } -/// A stream of `COPY ... TO STDOUT` query data. -pub struct CopyStream { - responses: Responses, +pin_project! { + /// A stream of `COPY ... TO STDOUT` query data. + pub struct CopyStream { + responses: Responses, + #[pin] + _p: PhantomPinned, + } } impl Stream for CopyStream { type Item = Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match ready!(self.responses.poll_next(cx)?) { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + + match ready!(this.responses.poll_next(cx)?) { Message::CopyData(body) => Poll::Ready(Some(Ok(body.into_bytes()))), Message::CopyDone => Poll::Ready(None), _ => Poll::Ready(Some(Err(Error::unexpected_message()))), From 4a3df873e906253f5d39aef805daca2b9103a9fe Mon Sep 17 00:00:00 2001 From: Brandon W Maister Date: Tue, 19 Nov 2019 10:23:58 -0500 Subject: [PATCH 302/819] Make all invalid message length errors unique --- postgres-protocol/src/message/backend.rs | 18 +++++++++--------- postgres-protocol/src/types/mod.rs | 18 +++++++++--------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 0cf1c4f0c..5de7908ab 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -53,7 +53,7 @@ impl Header { if len < 4 { return Err(io::Error::new( io::ErrorKind::InvalidData, - "invalid message length", + "invalid message length: header length < 4", )); } @@ -123,7 +123,7 @@ impl Message { if len < 4 { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: parsing u32", )); } @@ -271,7 +271,7 @@ impl Message { if !buf.is_empty() { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: expected buffer to be empty", )); } @@ -376,7 +376,7 @@ impl<'a> FallibleIterator for SaslMechanisms<'a> { if self.0.len() != 1 { return Err(io::Error::new( io::ErrorKind::InvalidData, - "invalid message length", + "invalid message length: expected to be at end of iterator for sasl", )); } Ok(None) @@ -488,7 +488,7 @@ impl<'a> FallibleIterator for ColumnFormats<'a> { } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: wrong column formats", )); } } @@ -564,7 +564,7 @@ impl<'a> FallibleIterator for DataRowRanges<'a> { } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: datarowrange is not empty", )); } } @@ -622,7 +622,7 @@ impl<'a> FallibleIterator for ErrorFields<'a> { } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: error fields is not drained", )); } } @@ -718,7 +718,7 @@ impl<'a> FallibleIterator for Parameters<'a> { } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: parameters is not drained", )); } } @@ -794,7 +794,7 @@ impl<'a> FallibleIterator for Fields<'a> { } else { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "invalid message length", + "invalid message length: field is not drained", )); } } diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 8af7486e7..8412f31d7 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -308,11 +308,11 @@ pub fn varbit_from_sql<'a>( ) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { - return Err("invalid varbit length".into()); + return Err("invalid varbit length: varbit < 0".into()); } let bytes = (len as usize + 7) / 8; if buf.len() != bytes { - return Err("invalid message length".into()); + return Err("invalid message length: varbit mismatch".into()); } Ok(Varbit { @@ -362,7 +362,7 @@ pub fn timestamp_to_sql(v: i64, buf: &mut BytesMut) { pub fn timestamp_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { - return Err("invalid message length".into()); + return Err("invalid message length: timestamp not drained".into()); } Ok(v) } @@ -382,7 +382,7 @@ pub fn date_to_sql(v: i32, buf: &mut BytesMut) { pub fn date_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i32::()?; if !buf.is_empty() { - return Err("invalid message length".into()); + return Err("invalid message length: date not drained".into()); } Ok(v) } @@ -402,7 +402,7 @@ pub fn time_to_sql(v: i64, buf: &mut BytesMut) { pub fn time_from_sql(mut buf: &[u8]) -> Result> { let v = buf.read_i64::()?; if !buf.is_empty() { - return Err("invalid message length".into()); + return Err("invalid message length: time not drained".into()); } Ok(v) } @@ -417,7 +417,7 @@ pub fn macaddr_to_sql(v: [u8; 6], buf: &mut BytesMut) { #[inline] pub fn macaddr_from_sql(buf: &[u8]) -> Result<[u8; 6], StdBox> { if buf.len() != 6 { - return Err("invalid message length".into()); + return Err("invalid message length: macaddr length mismatch".into()); } let mut out = [0; 6]; out.copy_from_slice(buf); @@ -434,7 +434,7 @@ pub fn uuid_to_sql(v: [u8; 16], buf: &mut BytesMut) { #[inline] pub fn uuid_from_sql(buf: &[u8]) -> Result<[u8; 16], StdBox> { if buf.len() != 16 { - return Err("invalid message length".into()); + return Err("invalid message length: uuid size mismatch".into()); } let mut out = [0; 16]; out.copy_from_slice(buf); @@ -615,7 +615,7 @@ impl<'a> FallibleIterator for ArrayValues<'a> { fn next(&mut self) -> Result>, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { - return Err("invalid message length".into()); + return Err("invalid message length: arrayvalue not drained".into()); } return Ok(None); } @@ -944,7 +944,7 @@ impl<'a> FallibleIterator for PathPoints<'a> { fn next(&mut self) -> Result, StdBox> { if self.remaining == 0 { if !self.buf.is_empty() { - return Err("invalid message length".into()); + return Err("invalid message length: path points not drained".into()); } return Ok(None); } From 5517719b079dd5fee9d11e6354059fc5537e635b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 22 Nov 2019 18:37:27 -0800 Subject: [PATCH 303/819] Binary copy out support --- tokio-postgres-binary-copy/Cargo.toml | 1 - tokio-postgres-binary-copy/src/lib.rs | 163 ++++++++++++++++++++++--- tokio-postgres-binary-copy/src/test.rs | 78 +++++++++++- 3 files changed, 222 insertions(+), 20 deletions(-) diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml index e6a70bafa..7b3f3b7e6 100644 --- a/tokio-postgres-binary-copy/Cargo.toml +++ b/tokio-postgres-binary-copy/Cargo.toml @@ -14,4 +14,3 @@ tokio-postgres = { version = "=0.5.0-alpha.1", default-features = false, path = [dev-dependencies] tokio = "=0.2.0-alpha.6" tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres" } - diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index fa42a84a8..99089da30 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -1,22 +1,27 @@ -use bytes::{BigEndian, BufMut, ByteOrder, Bytes, BytesMut}; -use futures::{future, Stream}; +use bytes::{BigEndian, BufMut, ByteOrder, Bytes, BytesMut, Buf}; +use futures::{future, ready, Stream}; use parking_lot::Mutex; use pin_project_lite::pin_project; use std::convert::TryFrom; use std::error::Error; use std::future::Future; +use std::ops::Range; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use tokio_postgres::types::{IsNull, ToSql, Type}; +use tokio_postgres::types::{IsNull, ToSql, Type, FromSql, WrongType}; +use tokio_postgres::CopyStream; +use std::io::Cursor; #[cfg(test)] mod test; const BLOCK_SIZE: usize = 4096; +const MAGIC: &[u8] = b"PGCOPY\n\xff\r\n\0"; +const HEADER_LEN: usize = MAGIC.len() + 4 + 4; pin_project! { - pub struct BinaryCopyStream { + pub struct BinaryCopyInStream { #[pin] future: F, buf: Arc>, @@ -24,27 +29,27 @@ pin_project! { } } -impl BinaryCopyStream +impl BinaryCopyInStream where F: Future>>, { - pub fn new(types: &[Type], write_values: M) -> BinaryCopyStream + pub fn new(types: &[Type], write_values: M) -> BinaryCopyInStream where - M: FnOnce(BinaryCopyWriter) -> F, + M: FnOnce(BinaryCopyInWriter) -> F, { let mut buf = BytesMut::new(); - buf.reserve(11 + 4 + 4); - buf.put_slice(b"PGCOPY\n\xff\r\n\0"); // magic + buf.reserve(HEADER_LEN); + buf.put_slice(MAGIC); // magic buf.put_i32_be(0); // flags buf.put_i32_be(0); // header extension let buf = Arc::new(Mutex::new(buf)); - let writer = BinaryCopyWriter { + let writer = BinaryCopyInWriter { buf: buf.clone(), types: types.to_vec(), }; - BinaryCopyStream { + BinaryCopyInStream { future: write_values(writer), buf, done: false, @@ -52,7 +57,7 @@ where } } -impl Stream for BinaryCopyStream +impl Stream for BinaryCopyInStream where F: Future>>, { @@ -81,12 +86,12 @@ where } // FIXME this should really just take a reference to the buffer, but that requires HKT :( -pub struct BinaryCopyWriter { +pub struct BinaryCopyInWriter { buf: Arc>, types: Vec, } -impl BinaryCopyWriter { +impl BinaryCopyInWriter { pub async fn write( &mut self, values: &[&(dyn ToSql + Send)], @@ -119,7 +124,7 @@ impl BinaryCopyWriter { let mut buf = self.buf.lock(); buf.reserve(2); - buf.put_i16_be(self.types.len() as i16); + buf.put_u16_be(self.types.len() as u16); for (value, type_) in values.zip(&self.types) { let idx = buf.len(); @@ -135,3 +140,131 @@ impl BinaryCopyWriter { Ok(()) } } + +struct Header { + has_oids: bool, +} + +pin_project! { + pub struct BinaryCopyOutStream { + #[pin] + stream: CopyStream, + types: Arc>, + header: Option
, + } +} + +impl BinaryCopyOutStream { + pub fn new(types: &[Type], stream: CopyStream) -> BinaryCopyOutStream { + BinaryCopyOutStream { + stream, + types: Arc::new(types.to_vec()), + header: None, + } + } +} + +impl Stream for BinaryCopyOutStream { + type Item = Result>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + + let chunk = match ready!(this.stream.poll_next(cx)) { + Some(Ok(chunk)) => chunk, + Some(Err(e)) => return Poll::Ready(Some(Err(e.into()))), + None => return Poll::Ready(Some(Err("unexpected EOF".into()))), + }; + let mut chunk= Cursor::new(chunk); + + let has_oids = match &this.header { + Some(header) => header.has_oids, + None => { + check_remaining(&chunk, HEADER_LEN)?; + if &chunk.bytes()[..MAGIC.len()] != MAGIC { + return Poll::Ready(Some(Err("invalid magic value".into()))); + } + chunk.advance(MAGIC.len()); + + let flags = chunk.get_i32_be(); + let has_oids = (flags & (1 << 16)) != 0; + + let header_extension = chunk.get_u32_be() as usize; + check_remaining(&chunk, header_extension)?; + chunk.advance(header_extension); + + *this.header = Some(Header { has_oids }); + has_oids + } + }; + + check_remaining(&chunk, 2)?; + let mut len = chunk.get_i16_be(); + if len == -1 { + return Poll::Ready(None); + } + + if has_oids { + len += 1; + } + if len as usize != this.types.len() { + return Poll::Ready(Some(Err("unexpected tuple size".into()))); + } + + let mut ranges = vec![]; + for _ in 0..len { + check_remaining(&chunk, 4)?; + let len = chunk.get_i32_be(); + if len == -1 { + ranges.push(None); + } else { + let len = len as usize; + check_remaining(&chunk, len)?; + let start = chunk.position() as usize; + ranges.push(Some(start..start + len)); + chunk.advance(len); + } + } + + Poll::Ready(Some(Ok(BinaryCopyOutRow { + buf: chunk.into_inner(), + ranges, + types: this.types.clone(), + }))) + } +} + +fn check_remaining(buf: &impl Buf, len: usize) -> Result<(), Box> { + if buf.remaining() < len { + Err("unexpected EOF".into()) + } else { + Ok(()) + } +} + +pub struct BinaryCopyOutRow { + buf: Bytes, + ranges: Vec>>, + types: Arc>, +} + +impl BinaryCopyOutRow { + pub fn try_get<'a, T>(&'a self, idx: usize) -> Result> where T: FromSql<'a> { + let type_ = &self.types[idx]; + if !T::accepts(type_) { + return Err(WrongType::new::(type_.clone()).into()); + } + + match &self.ranges[idx] { + Some(range) => T::from_sql(type_, &self.buf[range.clone()]).map_err(Into::into), + None => T::from_sql_null(type_).map_err(Into::into) + } + } + + pub fn get<'a, T>(&'a self, idx: usize) -> T where T: FromSql<'a> { + match self.try_get(idx) { + Ok(value) => value, + Err(e) => panic!("error retrieving column {}: {}", idx, e), + } + } +} diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs index 486ac5816..f4d193518 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres-binary-copy/src/test.rs @@ -1,6 +1,7 @@ -use crate::BinaryCopyStream; +use crate::{BinaryCopyInStream, BinaryCopyOutStream}; use tokio_postgres::types::Type; use tokio_postgres::{Client, NoTls}; +use futures::TryStreamExt; async fn connect() -> Client { let (client, connection) = @@ -22,7 +23,7 @@ async fn write_basic() { .await .unwrap(); - let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| { + let stream = BinaryCopyInStream::new(&[Type::INT4, Type::TEXT], |mut w| { async move { w.write(&[&1i32, &"foobar"]).await?; w.write(&[&2i32, &None::<&str>]).await?; @@ -56,7 +57,7 @@ async fn write_many_rows() { .await .unwrap(); - let stream = BinaryCopyStream::new(&[Type::INT4, Type::TEXT], |mut w| { + let stream = BinaryCopyInStream::new(&[Type::INT4, Type::TEXT], |mut w| { async move { for i in 0..10_000i32 { w.write(&[&i, &format!("the value for {}", i)]).await?; @@ -90,7 +91,7 @@ async fn write_big_rows() { .await .unwrap(); - let stream = BinaryCopyStream::new(&[Type::INT4, Type::BYTEA], |mut w| { + let stream = BinaryCopyInStream::new(&[Type::INT4, Type::BYTEA], |mut w| { async move { for i in 0..2i32 { w.write(&[&i, &vec![i as u8; 128 * 1024]]).await?; @@ -114,3 +115,72 @@ async fn write_big_rows() { assert_eq!(row.get::<_, &[u8]>(1), &*vec![i as u8; 128 * 1024]); } } + +#[tokio::test] +async fn read_basic() { + let client = connect().await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo (id INT, bar TEXT); + INSERT INTO foo (id, bar) VALUES (1, 'foobar'), (2, NULL); + " + ) + .await + .unwrap(); + + let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream).try_collect::>().await.unwrap(); + assert_eq!(rows.len(), 2); + + assert_eq!(rows[0].get::(0), 1); + assert_eq!(rows[0].get::>(1), Some("foobar")); + assert_eq!(rows[1].get::(0), 2); + assert_eq!(rows[1].get::>(1), None); +} + +#[tokio::test] +async fn read_many_rows() { + let client = connect().await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo (id INT, bar TEXT); + INSERT INTO foo (id, bar) SELECT i, 'the value for ' || i FROM generate_series(0, 9999) i;" + ) + .await + .unwrap(); + + let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream).try_collect::>().await.unwrap(); + assert_eq!(rows.len(), 10_000); + + for (i, row) in rows.iter().enumerate() { + assert_eq!(row.get::(0), i as i32); + assert_eq!(row.get::<&str>(1), format!("the value for {}", i)); + } +} + +#[tokio::test] +async fn read_big_rows() { + let client = connect().await; + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)") + .await + .unwrap(); + for i in 0..2i32 { + client.execute("INSERT INTO foo (id, bar) VALUES ($1, $2)", &[&i, &vec![i as u8; 128 * 1024]]).await.unwrap(); + } + + let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::BYTEA], stream).try_collect::>().await.unwrap(); + assert_eq!(rows.len(), 2); + + for (i, row) in rows.iter().enumerate() { + assert_eq!(row.get::(0), i as i32); + assert_eq!(row.get::<&[u8]>(1), &vec![i as u8; 128 * 1024][..]); + } +} From 12c2ef79b6b96bb12ea8cc4937f27b325729dfd2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 26 Nov 2019 16:32:36 -0800 Subject: [PATCH 304/819] Upgrade to tokio 0.2/futures 0.3 --- postgres-native-tls/Cargo.toml | 7 +- postgres-native-tls/src/lib.rs | 9 ++- postgres-openssl/Cargo.toml | 9 ++- postgres-openssl/src/lib.rs | 9 ++- postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/lib.rs | 26 +------ postgres-protocol/src/message/backend.rs | 4 +- postgres-protocol/src/message/frontend.rs | 77 +++++++++---------- postgres-protocol/src/types/mod.rs | 94 +++++++++++------------ postgres-types/Cargo.toml | 2 +- postgres-types/src/lib.rs | 26 +------ postgres/Cargo.toml | 8 +- postgres/src/client.rs | 2 +- postgres/src/config.rs | 6 +- postgres/src/copy_in_stream.rs | 9 +-- postgres/src/lib.rs | 4 +- postgres/src/test.rs | 19 ----- tokio-postgres-binary-copy/Cargo.toml | 9 ++- tokio-postgres-binary-copy/src/lib.rs | 25 +++--- tokio-postgres/Cargo.toml | 11 +-- tokio-postgres/src/bind.rs | 2 +- tokio-postgres/src/client.rs | 5 +- tokio-postgres/src/codec.rs | 2 +- tokio-postgres/src/connect_raw.rs | 2 +- tokio-postgres/src/connect_socket.rs | 4 +- tokio-postgres/src/connection.rs | 2 +- tokio-postgres/src/copy_in.rs | 14 ++-- tokio-postgres/src/copy_out.rs | 9 ++- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/src/maybe_tls_stream.rs | 3 +- tokio-postgres/src/portal.rs | 2 +- tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/src/query.rs | 4 +- tokio-postgres/src/simple_query.rs | 2 +- tokio-postgres/src/socket.rs | 3 +- tokio-postgres/src/statement.rs | 2 +- tokio-postgres/src/transaction.rs | 7 +- tokio-postgres/tests/test/main.rs | 37 ++++++--- tokio-postgres/tests/test/runtime.rs | 6 +- 39 files changed, 210 insertions(+), 258 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index e9ab95f08..bd71250bb 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,13 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] +bytes = "0.5" futures-preview = "=0.3.0-alpha.19" native-tls = "0.2" -tokio-io = "=0.2.0-alpha.6" -tokio-tls = "=0.3.0-alpha.6" +tokio = "0.2" +tokio-tls = "0.3" tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.6" +tokio = { version = "0.2", features = ["full"] } postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index add9ea8a8..e10358d48 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -48,16 +48,17 @@ #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use futures::task::Context; -use futures::Poll; +use std::task::{Context, Poll}; use std::future::Future; use std::io; use std::pin::Pin; -use tokio_io::{AsyncRead, AsyncWrite, Buf, BufMut}; +use tokio::io::{AsyncRead, AsyncWrite}; +use bytes::{Buf, BufMut}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; +use std::mem::MaybeUninit; #[cfg(test)] mod test; @@ -134,7 +135,7 @@ impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { self.0.prepare_uninitialized_buffer(buf) } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 1e9787c8e..7d28a4bbf 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,13 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures-preview = "=0.3.0-alpha.19" +bytes = "0.5" +futures = "0.3" openssl = "0.10" -tokio-io = "=0.2.0-alpha.6" -tokio-openssl = "=0.4.0-alpha.6" +tokio = "0.2" +tokio-openssl = "0.4" tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = "=0.2.0-alpha.6" +tokio = { version = "0.2", features = ["full"] } postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index a6d27d631..3a884ffb3 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -42,8 +42,7 @@ #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use futures::task::Context; -use futures::Poll; +use std::task::{Poll, Context}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; @@ -57,12 +56,14 @@ use std::io; use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; -use tokio_io::{AsyncRead, AsyncWrite, Buf, BufMut}; +use tokio::io::{AsyncRead, AsyncWrite}; +use bytes::{Buf, BufMut}; use tokio_openssl::{HandshakeError, SslStream}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; +use std::mem::MaybeUninit; #[cfg(test)] mod test; @@ -156,7 +157,7 @@ impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { self.0.prepare_uninitialized_buffer(buf) } diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 13d13c435..baba826c8 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -11,7 +11,7 @@ readme = "../README.md" [dependencies] base64 = "0.10" byteorder = "1.0" -bytes = "0.4" +bytes = "0.5" fallible-iterator = "0.2" generic-array = "0.13" hmac = "0.7" diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 8a9f90e2f..9ebbcba59 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -31,37 +31,13 @@ pub enum IsNull { No, } -// https://github.com/tokio-rs/bytes/issues/170 -struct B<'a>(&'a mut BytesMut); - -impl<'a> BufMut for B<'a> { - #[inline] - fn remaining_mut(&self) -> usize { - usize::max_value() - self.0.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - self.0.advance_mut(cnt); - } - - #[inline] - unsafe fn bytes_mut(&mut self) -> &mut [u8] { - if !self.0.has_remaining_mut() { - self.0.reserve(64); - } - - self.0.bytes_mut() - } -} - fn write_nullable(serializer: F, buf: &mut BytesMut) -> Result<(), E> where F: FnOnce(&mut BytesMut) -> Result, E: From, { let base = buf.len(); - B(buf).put_i32_be(0); + buf.put_i32(0); let size = match serializer(buf)? { IsNull::No => i32::from_usize(buf.len() - base - 4)?, IsNull::Yes => -1, diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 5de7908ab..7c704511e 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -301,7 +301,7 @@ impl Buffer { Some(pos) => { let start = self.idx; let end = start + pos; - let cstr = self.bytes.slice(start, end); + let cstr = self.bytes.slice(start..end); self.idx = end + 1; Ok(cstr) } @@ -314,7 +314,7 @@ impl Buffer { #[inline] fn read_all(&mut self) -> Bytes { - let buf = self.bytes.slice_from(self.idx); + let buf = self.bytes.slice(self.idx..); self.idx = self.bytes.len(); buf } diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 697316ce8..8587cd080 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -2,13 +2,13 @@ #![allow(missing_docs)] use byteorder::{BigEndian, ByteOrder}; -use bytes::{Buf, BufMut, BytesMut, IntoBuf}; +use bytes::{Buf, BufMut, BytesMut}; use std::convert::TryFrom; use std::error::Error; use std::io; use std::marker; -use crate::{write_nullable, FromUsize, IsNull, Oid, B}; +use crate::{write_nullable, FromUsize, IsNull, Oid}; #[inline] fn write_body(buf: &mut BytesMut, f: F) -> Result<(), E> @@ -61,7 +61,7 @@ where F: FnMut(T, &mut BytesMut) -> Result>, K: IntoIterator, { - B(buf).put_u8(b'B'); + buf.put_u8(b'B'); write_body(buf, |buf| { write_cstr(portal.as_bytes(), buf)?; @@ -69,7 +69,7 @@ where write_counted( formats, |f, buf| { - B(buf).put_i16_be(f); + buf.put_i16(f); Ok::<_, io::Error>(()) }, buf, @@ -82,7 +82,7 @@ where write_counted( result_formats, |f, buf| { - B(buf).put_i16_be(f); + buf.put_i16(f); Ok::<_, io::Error>(()) }, buf, @@ -115,9 +115,9 @@ where #[inline] pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) { write_body(buf, |buf| { - B(buf).put_i32_be(80_877_102); - B(buf).put_i32_be(process_id); - B(buf).put_i32_be(secret_key); + buf.put_i32(80_877_102); + buf.put_i32(process_id); + buf.put_i32(secret_key); Ok::<_, io::Error>(()) }) .unwrap(); @@ -125,9 +125,9 @@ pub fn cancel_request(process_id: i32, secret_key: i32, buf: &mut BytesMut) { #[inline] pub fn close(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'C'); + buf.put_u8(b'C'); write_body(buf, |buf| { - B(buf).put_u8(variant); + buf.put_u8(variant); write_cstr(name.as_bytes(), buf) }) } @@ -141,12 +141,7 @@ impl CopyData where T: Buf, { - pub fn new(buf: U) -> io::Result> - where - U: IntoBuf, - { - let buf = buf.into_buf(); - + pub fn new(buf: T) -> io::Result> { let len = buf .remaining() .checked_add(4) @@ -159,39 +154,39 @@ where } pub fn write(self, out: &mut BytesMut) { - B(out).put_u8(b'd'); - B(out).put_i32_be(self.len); - B(out).put(self.buf); + out.put_u8(b'd'); + out.put_i32(self.len); + out.put(self.buf); } } #[inline] pub fn copy_done(buf: &mut BytesMut) { - B(buf).put_u8(b'c'); + buf.put_u8(b'c'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] pub fn copy_fail(message: &str, buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'f'); + buf.put_u8(b'f'); write_body(buf, |buf| write_cstr(message.as_bytes(), buf)) } #[inline] pub fn describe(variant: u8, name: &str, buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'D'); + buf.put_u8(b'D'); write_body(buf, |buf| { - B(buf).put_u8(variant); + buf.put_u8(variant); write_cstr(name.as_bytes(), buf) }) } #[inline] pub fn execute(portal: &str, max_rows: i32, buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'E'); + buf.put_u8(b'E'); write_body(buf, |buf| { write_cstr(portal.as_bytes(), buf)?; - B(buf).put_i32_be(max_rows); + buf.put_i32(max_rows); Ok(()) }) } @@ -201,14 +196,14 @@ pub fn parse(name: &str, query: &str, param_types: I, buf: &mut BytesMut) -> where I: IntoIterator, { - B(buf).put_u8(b'P'); + buf.put_u8(b'P'); write_body(buf, |buf| { write_cstr(name.as_bytes(), buf)?; write_cstr(query.as_bytes(), buf)?; write_counted( param_types, |t, buf| { - B(buf).put_u32_be(t); + buf.put_u32(t); Ok::<_, io::Error>(()) }, buf, @@ -219,33 +214,33 @@ where #[inline] pub fn password_message(password: &[u8], buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'p'); + buf.put_u8(b'p'); write_body(buf, |buf| write_cstr(password, buf)) } #[inline] pub fn query(query: &str, buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'Q'); + buf.put_u8(b'Q'); write_body(buf, |buf| write_cstr(query.as_bytes(), buf)) } #[inline] pub fn sasl_initial_response(mechanism: &str, data: &[u8], buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'p'); + buf.put_u8(b'p'); write_body(buf, |buf| { write_cstr(mechanism.as_bytes(), buf)?; let len = i32::from_usize(data.len())?; - B(buf).put_i32_be(len); - B(buf).put_slice(data); + buf.put_i32(len); + buf.put_slice(data); Ok(()) }) } #[inline] pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> { - B(buf).put_u8(b'p'); + buf.put_u8(b'p'); write_body(buf, |buf| { - B(buf).put_slice(data); + buf.put_slice(data); Ok(()) }) } @@ -253,7 +248,7 @@ pub fn sasl_response(data: &[u8], buf: &mut BytesMut) -> io::Result<()> { #[inline] pub fn ssl_request(buf: &mut BytesMut) { write_body(buf, |buf| { - B(buf).put_i32_be(80_877_103); + buf.put_i32(80_877_103); Ok::<_, io::Error>(()) }) .unwrap(); @@ -265,25 +260,25 @@ where I: IntoIterator, { write_body(buf, |buf| { - B(buf).put_i32_be(196_608); + buf.put_i32(196_608); for (key, value) in parameters { write_cstr(key.as_bytes(), buf)?; write_cstr(value.as_bytes(), buf)?; } - B(buf).put_u8(0); + buf.put_u8(0); Ok(()) }) } #[inline] pub fn sync(buf: &mut BytesMut) { - B(buf).put_u8(b'S'); + buf.put_u8(b'S'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } #[inline] pub fn terminate(buf: &mut BytesMut) { - B(buf).put_u8(b'X'); + buf.put_u8(b'X'); write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); } @@ -295,7 +290,7 @@ fn write_cstr(s: &[u8], buf: &mut BytesMut) -> Result<(), io::Error> { "string contains embedded null", )); } - B(buf).put_slice(s); - B(buf).put_u8(0); + buf.put_slice(s); + buf.put_u8(0); Ok(()) } diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 8412f31d7..621c01cc2 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -8,7 +8,7 @@ use std::io::Read; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; -use crate::{write_nullable, FromUsize, IsNull, Oid, B}; +use crate::{write_nullable, FromUsize, IsNull, Oid}; #[cfg(test)] mod test; @@ -25,7 +25,7 @@ const PGSQL_AF_INET6: u8 = 3; /// Serializes a `BOOL` value. #[inline] pub fn bool_to_sql(v: bool, buf: &mut BytesMut) { - B(buf).put_u8(v as u8); + buf.put_u8(v as u8); } /// Deserializes a `BOOL` value. @@ -41,7 +41,7 @@ pub fn bool_from_sql(buf: &[u8]) -> Result /// Serializes a `BYTEA` value. #[inline] pub fn bytea_to_sql(v: &[u8], buf: &mut BytesMut) { - B(buf).put_slice(v); + buf.put_slice(v); } /// Deserializes a `BYTEA value. @@ -53,7 +53,7 @@ pub fn bytea_from_sql(buf: &[u8]) -> &[u8] { /// Serializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. #[inline] pub fn text_to_sql(v: &str, buf: &mut BytesMut) { - B(buf).put_slice(v.as_bytes()); + buf.put_slice(v.as_bytes()); } /// Deserializes a `TEXT`, `VARCHAR`, `CHAR(n)`, `NAME`, or `CITEXT` value. @@ -65,7 +65,7 @@ pub fn text_from_sql(buf: &[u8]) -> Result<&str, StdBox /// Serializes a `"char"` value. #[inline] pub fn char_to_sql(v: i8, buf: &mut BytesMut) { - B(buf).put_i8(v); + buf.put_i8(v); } /// Deserializes a `"char"` value. @@ -81,7 +81,7 @@ pub fn char_from_sql(mut buf: &[u8]) -> Result Result Result Result Result Result)>, { let base = buf.len(); - B(buf).put_i32_be(0); + buf.put_i32(0); let mut count = 0; for (key, value) in values { @@ -196,7 +196,7 @@ where Some(value) => { write_pascal_string(value, buf)?; } - None => B(buf).put_i32_be(-1), + None => buf.put_i32(-1), } } @@ -208,8 +208,8 @@ where fn write_pascal_string(s: &str, buf: &mut BytesMut) -> Result<(), StdBox> { let size = i32::from_usize(s.len())?; - B(buf).put_i32_be(size); - B(buf).put_slice(s.as_bytes()); + buf.put_i32(size); + buf.put_slice(s.as_bytes()); Ok(()) } @@ -292,10 +292,10 @@ where I: Iterator, { let len = i32::from_usize(len)?; - B(buf).put_i32_be(len); + buf.put_i32(len); for byte in v { - B(buf).put_u8(byte); + buf.put_u8(byte); } Ok(()) @@ -352,7 +352,7 @@ impl<'a> Varbit<'a> { /// The value should represent the number of microseconds since midnight, January 1st, 2000. #[inline] pub fn timestamp_to_sql(v: i64, buf: &mut BytesMut) { - B(buf).put_i64_be(v); + buf.put_i64(v); } /// Deserializes a `TIMESTAMP` or `TIMESTAMPTZ` value. @@ -372,7 +372,7 @@ pub fn timestamp_from_sql(mut buf: &[u8]) -> Result Result Result Result<[u8; 6], StdBox Result>, { let dimensions_idx = buf.len(); - B(buf).put_i32_be(0); + buf.put_i32(0); let flags_idx = buf.len(); - B(buf).put_i32_be(0); - B(buf).put_u32_be(element_type); + buf.put_i32(0); + buf.put_u32(element_type); let mut num_dimensions = 0; for dimension in dimensions { num_dimensions += 1; - B(buf).put_i32_be(dimension.len); - B(buf).put_i32_be(dimension.lower_bound); + buf.put_i32(dimension.len); + buf.put_i32(dimension.lower_bound); } let num_dimensions = i32::from_usize(num_dimensions)?; @@ -646,7 +646,7 @@ impl<'a> FallibleIterator for ArrayValues<'a> { /// Serializes an empty range. #[inline] pub fn empty_range_to_sql(buf: &mut BytesMut) { - B(buf).put_u8(RANGE_EMPTY); + buf.put_u8(RANGE_EMPTY); } /// Serializes a range value. @@ -660,7 +660,7 @@ where G: FnOnce(&mut BytesMut) -> Result, StdBox>, { let tag_idx = buf.len(); - B(buf).put_u8(0); + buf.put_u8(0); let mut tag = 0; match write_bound(lower, buf)? { @@ -688,7 +688,7 @@ where F: FnOnce(&mut BytesMut) -> Result, StdBox>, { let base = buf.len(); - B(buf).put_i32_be(0); + buf.put_i32(0); let (null, ret) = match bound(buf)? { RangeBound::Inclusive(null) => (Some(null), RangeBound::Inclusive(())), @@ -784,8 +784,8 @@ pub enum Range<'a> { /// Serializes a point value. #[inline] pub fn point_to_sql(x: f64, y: f64, buf: &mut BytesMut) { - B(buf).put_f64_be(x); - B(buf).put_f64_be(y); + buf.put_f64(x); + buf.put_f64(y); } /// Deserializes a point value. @@ -823,10 +823,10 @@ impl Point { /// Serializes a box value. #[inline] pub fn box_to_sql(x1: f64, y1: f64, x2: f64, y2: f64, buf: &mut BytesMut) { - B(buf).put_f64_be(x1); - B(buf).put_f64_be(y1); - B(buf).put_f64_be(x2); - B(buf).put_f64_be(y2); + buf.put_f64(x1); + buf.put_f64(y1); + buf.put_f64(x2); + buf.put_f64(y2); } /// Deserializes a box value. @@ -876,15 +876,15 @@ pub fn path_to_sql( where I: IntoIterator, { - B(buf).put_u8(closed as u8); + buf.put_u8(closed as u8); let points_idx = buf.len(); - B(buf).put_i32_be(0); + buf.put_i32(0); let mut num_points = 0; for (x, y) in points { num_points += 1; - B(buf).put_f64_be(x); - B(buf).put_f64_be(y); + buf.put_f64(x); + buf.put_f64(y); } let num_points = i32::from_usize(num_points)?; @@ -970,17 +970,17 @@ pub fn inet_to_sql(addr: IpAddr, netmask: u8, buf: &mut BytesMut) { IpAddr::V4(_) => PGSQL_AF_INET, IpAddr::V6(_) => PGSQL_AF_INET6, }; - B(buf).put_u8(family); - B(buf).put_u8(netmask); - B(buf).put_u8(0); // is_cidr + buf.put_u8(family); + buf.put_u8(netmask); + buf.put_u8(0); // is_cidr match addr { IpAddr::V4(addr) => { - B(buf).put_u8(4); - B(buf).put_slice(&addr.octets()); + buf.put_u8(4); + buf.put_slice(&addr.octets()); } IpAddr::V6(addr) => { - B(buf).put_u8(16); - B(buf).put_slice(&addr.octets()); + buf.put_u8(16); + buf.put_slice(&addr.octets()); } } } diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 7ce393624..480446c4a 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -21,7 +21,7 @@ with-uuid-0_7 = ["uuid-07"] with-uuid-0_8 = ["uuid-08"] [dependencies] -bytes = "0.4" +bytes = "0.5" fallible-iterator = "0.2" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-derive = { version = "=0.4.0-alpha.1", optional = true, path = "../postgres-derive" } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 79144546b..47895a0e4 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -132,7 +132,7 @@ use crate::type_gen::{Inner, Other}; pub use postgres_protocol::Oid; pub use crate::special::{Date, Timestamp}; -use bytes::{BufMut, BytesMut}; +use bytes::BytesMut; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946_684_800; @@ -208,30 +208,6 @@ pub mod private; mod special; mod type_gen; -// https://github.com/tokio-rs/bytes/issues/170 -struct B<'a>(&'a mut BytesMut); - -impl<'a> BufMut for B<'a> { - #[inline] - fn remaining_mut(&self) -> usize { - usize::max_value() - self.0.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - self.0.advance_mut(cnt); - } - - #[inline] - unsafe fn bytes_mut(&mut self) -> &mut [u8] { - if !self.0.has_remaining_mut() { - self.0.reserve(64); - } - - self.0.bytes_mut() - } -} - /// A Postgres type. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Type(Inner); diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 2e9726e07..9625365bf 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -29,16 +29,16 @@ with-uuid-0_7 = ["tokio-postgres/with-uuid-0_7"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] [dependencies] -bytes = "0.4" +bytes = "0.5" fallible-iterator = "0.2" -futures-preview = "=0.3.0-alpha.19" +futures = "0.3" pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } tokio-executor = "=0.2.0-alpha.6" -tokio = { version = "=0.2.0-alpha.6", optional = true } +tokio = { version = "0.2", optional = true, features = ["rt-threaded"] } lazy_static = { version = "1.0", optional = true } log = { version = "0.4", optional = true } [dev-dependencies] -tokio = "=0.2.0-alpha.6" +tokio = "0.2" diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 36caf9820..e680f8c5b 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -299,7 +299,7 @@ impl Client { /// use postgres::{Client, NoTls}; /// use std::io::Read; /// - /// # fn main() -> Result<(), Box> { + /// # fn main() -> Result<(), Box> { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// /// let mut reader = client.copy_out("COPY people TO stdout", &[])?; diff --git a/postgres/src/config.rs b/postgres/src/config.rs index a7702ef88..df7bdbcc9 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -2,7 +2,7 @@ //! //! Requires the `runtime` Cargo feature (enabled by default). -use futures::FutureExt; +use futures::{FutureExt, executor}; use log::error; use std::fmt; use std::path::Path; @@ -274,7 +274,9 @@ impl Config { } None => { let connect = self.config.connect(tls); - RUNTIME.block_on(connect)? + RUNTIME.handle().enter(|| { + executor::block_on(connect) + })? } }; diff --git a/postgres/src/copy_in_stream.rs b/postgres/src/copy_in_stream.rs index 0dc2f0bbb..6bda3e5dd 100644 --- a/postgres/src/copy_in_stream.rs +++ b/postgres/src/copy_in_stream.rs @@ -1,6 +1,5 @@ use futures::Stream; -use std::io; -use std::io::Read; +use std::io::{self, Cursor, Read}; use std::pin::Pin; use std::task::{Context, Poll}; @@ -10,16 +9,16 @@ impl Stream for CopyInStream where R: Read + Unpin, { - type Item = io::Result>; + type Item = io::Result>>; fn poll_next( mut self: Pin<&mut Self>, _: &mut Context<'_>, - ) -> Poll>>> { + ) -> Poll>>>> { let mut buf = vec![]; match self.0.by_ref().take(4096).read_to_end(&mut buf)? { 0 => Poll::Ready(None), - _ => Poll::Ready(Some(Ok(buf))), + _ => Poll::Ready(Some(Ok(Cursor::new(buf)))), } } } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index b8e5a76b8..1cf9e3248 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -92,7 +92,9 @@ mod test; #[cfg(feature = "runtime")] lazy_static! { static ref RUNTIME: Runtime = runtime::Builder::new() - .name_prefix("postgres-") + .thread_name("postgres") + .threaded_scheduler() + .enable_all() .build() .unwrap(); } diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 59953e4e9..f7d84a88d 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,5 +1,4 @@ use std::io::Read; -use tokio::runtime::Runtime; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -223,21 +222,3 @@ fn portal() { assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); } - -#[test] -fn custom_executor() { - let runtime = Runtime::new().unwrap(); - let mut config = "host=localhost port=5433 user=postgres" - .parse::() - .unwrap(); - config.executor(runtime.executor()); - - let mut client = config.connect(NoTls).unwrap(); - - let rows = client.query("SELECT $1::TEXT", &[&"hello"]).unwrap(); - assert_eq!(rows.len(), 1); - assert_eq!(rows[0].get::<_, &str>(0), "hello"); - - drop(runtime); - assert!(client.is_closed()); -} diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml index 7b3f3b7e6..2832f538b 100644 --- a/tokio-postgres-binary-copy/Cargo.toml +++ b/tokio-postgres-binary-copy/Cargo.toml @@ -5,12 +5,13 @@ authors = ["Steven Fackler "] edition = "2018" [dependencies] -bytes = "0.4" -futures-preview = "=0.3.0-alpha.19" -parking_lot = "0.9" +byteorder = "1.0" +bytes = "0.5" +futures = "0.3" +parking_lot = "0.10" pin-project-lite = "0.1" tokio-postgres = { version = "=0.5.0-alpha.1", default-features = false, path = "../tokio-postgres" } [dev-dependencies] -tokio = "=0.2.0-alpha.6" +tokio = { version = "0.2", features = ["full"] } tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres" } diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index 99089da30..59acf7460 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -1,4 +1,4 @@ -use bytes::{BigEndian, BufMut, ByteOrder, Bytes, BytesMut, Buf}; +use bytes::{BufMut, Bytes, BytesMut, Buf}; use futures::{future, ready, Stream}; use parking_lot::Mutex; use pin_project_lite::pin_project; @@ -12,6 +12,7 @@ use std::task::{Context, Poll}; use tokio_postgres::types::{IsNull, ToSql, Type, FromSql, WrongType}; use tokio_postgres::CopyStream; use std::io::Cursor; +use byteorder::{ByteOrder, BigEndian}; #[cfg(test)] mod test; @@ -40,8 +41,8 @@ where let mut buf = BytesMut::new(); buf.reserve(HEADER_LEN); buf.put_slice(MAGIC); // magic - buf.put_i32_be(0); // flags - buf.put_i32_be(0); // header extension + buf.put_i32(0); // flags + buf.put_i32(0); // header extension let buf = Arc::new(Mutex::new(buf)); let writer = BinaryCopyInWriter { @@ -75,10 +76,10 @@ where let mut buf = this.buf.lock(); if *this.done { buf.reserve(2); - buf.put_i16_be(-1); - Poll::Ready(Some(Ok(buf.take().freeze()))) + buf.put_i16(-1); + Poll::Ready(Some(Ok(buf.split().freeze()))) } else if buf.len() > BLOCK_SIZE { - Poll::Ready(Some(Ok(buf.take().freeze()))) + Poll::Ready(Some(Ok(buf.split().freeze()))) } else { Poll::Pending } @@ -124,12 +125,12 @@ impl BinaryCopyInWriter { let mut buf = self.buf.lock(); buf.reserve(2); - buf.put_u16_be(self.types.len() as u16); + buf.put_u16(self.types.len() as u16); for (value, type_) in values.zip(&self.types) { let idx = buf.len(); buf.reserve(4); - buf.put_i32_be(0); + buf.put_i32(0); let len = match value.to_sql_checked(type_, &mut buf)? { IsNull::Yes => -1, IsNull::No => i32::try_from(buf.len() - idx - 4)?, @@ -186,10 +187,10 @@ impl Stream for BinaryCopyOutStream { } chunk.advance(MAGIC.len()); - let flags = chunk.get_i32_be(); + let flags = chunk.get_i32(); let has_oids = (flags & (1 << 16)) != 0; - let header_extension = chunk.get_u32_be() as usize; + let header_extension = chunk.get_u32() as usize; check_remaining(&chunk, header_extension)?; chunk.advance(header_extension); @@ -199,7 +200,7 @@ impl Stream for BinaryCopyOutStream { }; check_remaining(&chunk, 2)?; - let mut len = chunk.get_i16_be(); + let mut len = chunk.get_i16(); if len == -1 { return Poll::Ready(None); } @@ -214,7 +215,7 @@ impl Stream for BinaryCopyOutStream { let mut ranges = vec![]; for _ in 0..len { check_remaining(&chunk, 4)?; - let len = chunk.get_i32_be(); + let len = chunk.get_i32(); if len == -1 { ranges.push(None); } else { diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 73e05ca3f..bf6d543c0 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -25,7 +25,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio/rt-full", "tokio/tcp", "tokio/uds"] +runtime = ["tokio/dns", "tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] @@ -36,9 +36,9 @@ with-uuid-0_7 = ["postgres-types/with-uuid-0_7"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] [dependencies] -bytes = "0.4" +bytes = "0.5" fallible-iterator = "0.2" -futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] } +futures = "0.3" log = "0.4" parking_lot = "0.9" percent-encoding = "2.0" @@ -46,10 +46,11 @@ pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } -tokio = { version = "=0.2.0-alpha.6", default-features = false, features = ["io", "codec"] } +tokio = { version = "0.2", features = ["io-util"] } +tokio-util = { version = "0.2", features = ["codec"] } [dev-dependencies] -tokio = "=0.2.0-alpha.6" +tokio = { version = "0.2", features = ["full"] } env_logger = "0.7" criterion = "0.3" diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index eb9d1ba8d..69823a9ab 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -23,7 +23,7 @@ where let buf = client.with_buf(|buf| { query::encode_bind(&statement, params, &name, buf)?; frontend::sync(buf); - Ok(buf.take().freeze()) + Ok(buf.split().freeze()) })?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 34d11b5fb..4d7c20536 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -18,7 +18,7 @@ use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; -use bytes::{BytesMut, IntoBuf}; +use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{future, pin_mut, ready, StreamExt, TryStream, TryStreamExt}; @@ -357,8 +357,7 @@ impl Client { where T: ?Sized + ToStatement, S: TryStream, - S::Ok: IntoBuf, - ::Buf: 'static + Send, + S::Ok: Buf + 'static + Send, S::Error: Into>, { let statement = statement.__convert().into_statement(self).await?; diff --git a/tokio-postgres/src/codec.rs b/tokio-postgres/src/codec.rs index f9f216bd9..2fae8bc17 100644 --- a/tokio-postgres/src/codec.rs +++ b/tokio-postgres/src/codec.rs @@ -3,7 +3,7 @@ use fallible_iterator::FallibleIterator; use postgres_protocol::message::backend; use postgres_protocol::message::frontend::CopyData; use std::io; -use tokio::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder}; pub enum FrontendMessage { Raw(Bytes), diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index b6a8d8d6e..f9149dfce 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -17,8 +17,8 @@ use std::collections::HashMap; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::codec::Framed; pub struct StartupStream { inner: Framed, PostgresCodec>, diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 7bd858dd9..2d56a2ed5 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -6,7 +6,7 @@ use std::time::Duration; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; -use tokio::timer::Timeout; +use tokio::time; pub(crate) async fn connect_socket( host: &Host, @@ -42,7 +42,7 @@ where F: Future>, { match timeout { - Some(timeout) => match Timeout::new(connect, timeout).await { + Some(timeout) => match time::timeout(timeout, connect).await { Ok(Ok(socket)) => Ok(socket), Ok(Err(e)) => Err(Error::connect(e)), Err(_) => Err(Error::connect(io::Error::new( diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 841f16d08..5b0144286 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -15,8 +15,8 @@ use std::collections::{HashMap, VecDeque}; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::codec::Framed; pub enum RequestMessages { Single(FrontendMessage), diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index f4ba95cb7..ebacb6cf1 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -3,7 +3,8 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::ToSql; use crate::{query, Error, Statement}; -use bytes::{Buf, BufMut, BytesMut, IntoBuf}; +use bytes::buf::BufExt; +use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; use futures::{pin_mut, ready, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; use postgres_protocol::message::backend::Message; @@ -70,8 +71,7 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, S: TryStream, - S::Ok: IntoBuf, - ::Buf: 'static + Send, + S::Ok: Buf + 'static + Send, S::Error: Into>, { let buf = query::encode(client, &statement, params)?; @@ -100,19 +100,17 @@ where pin_mut!(stream); while let Some(buf) = stream.try_next().await.map_err(Error::copy_in_stream)? { - let buf = buf.into_buf(); - let data: Box = if buf.remaining() > 4096 { if bytes.is_empty() { Box::new(buf) } else { - Box::new(bytes.take().freeze().into_buf().chain(buf)) + Box::new(bytes.split().freeze().chain(buf)) } } else { bytes.reserve(buf.remaining()); bytes.put(buf); if bytes.len() > 4096 { - Box::new(bytes.take().freeze().into_buf()) + Box::new(bytes.split().freeze()) } else { continue; } @@ -126,7 +124,7 @@ where } if !bytes.is_empty() { - let data: Box = Box::new(bytes.freeze().into_buf()); + let data: Box = Box::new(bytes.freeze()); let data = CopyData::new(data).map_err(Error::encode)?; sender .send(CopyInMessage::Message(FrontendMessage::CopyData(data))) diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 8bfd28190..0c032208e 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,15 +1,15 @@ use crate::client::{InnerClient, Responses}; -use pin_project_lite::pin_project; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::ToSql; use crate::{query, Error, Statement}; use bytes::Bytes; use futures::{ready, Stream}; +use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; +use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -use std::marker::PhantomPinned; pub async fn copy_out<'a, I>( client: &InnerClient, @@ -22,7 +22,10 @@ where { let buf = query::encode(client, &statement, params)?; let responses = start(client, buf).await?; - Ok(CopyStream { responses, _p: PhantomPinned }) + Ok(CopyStream { + responses, + _p: PhantomPinned, + }) } async fn start(client: &InnerClient, buf: Bytes) -> Result { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index f5636521b..dc88389fb 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -105,6 +105,7 @@ pub use crate::client::Client; pub use crate::config::Config; pub use crate::connection::Connection; +pub use crate::copy_out::CopyStream; use crate::error::DbError; pub use crate::error::Error; pub use crate::portal::Portal; @@ -114,7 +115,6 @@ pub use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; -pub use crate::copy_out::CopyStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs index a8f0d3a6b..652236ee8 100644 --- a/tokio-postgres/src/maybe_tls_stream.rs +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -1,6 +1,7 @@ use crate::tls::{ChannelBinding, TlsStream}; use bytes::{Buf, BufMut}; use std::io; +use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; @@ -15,7 +16,7 @@ where S: AsyncRead + Unpin, T: AsyncRead + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { match self { MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), diff --git a/tokio-postgres/src/portal.rs b/tokio-postgres/src/portal.rs index ba8ab82a1..464d175da 100644 --- a/tokio-postgres/src/portal.rs +++ b/tokio-postgres/src/portal.rs @@ -17,7 +17,7 @@ impl Drop for Inner { let buf = client.with_buf(|buf| { frontend::close(b'P', &self.name, buf).unwrap(); frontend::sync(buf); - buf.take().freeze() + buf.split().freeze() }); let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 450d3b0bd..57927b8f4 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -115,7 +115,7 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu frontend::parse(name, query, types.iter().map(Type::oid), buf).map_err(Error::encode)?; frontend::describe(b'S', &name, buf).map_err(Error::encode)?; frontend::sync(buf); - Ok(buf.take().freeze()) + Ok(buf.split().freeze()) }) } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 02415449c..76d14e5cc 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -38,7 +38,7 @@ pub async fn query_portal( let buf = client.with_buf(|buf| { frontend::execute(portal.name(), max_rows, buf).map_err(Error::encode)?; frontend::sync(buf); - Ok(buf.take().freeze()) + Ok(buf.split().freeze()) })?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -102,7 +102,7 @@ where encode_bind(statement, params, "", buf)?; frontend::execute("", 0, buf).map_err(Error::encode)?; frontend::sync(buf); - Ok(buf.take().freeze()) + Ok(buf.split().freeze()) }) } diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index c78000ffa..04899fb9a 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -43,7 +43,7 @@ pub async fn batch_execute(client: &InnerClient, query: &str) -> Result<(), Erro fn encode(client: &InnerClient, query: &str) -> Result { client.with_buf(|buf| { frontend::query(query, buf).map_err(Error::encode)?; - Ok(buf.take().freeze()) + Ok(buf.split().freeze()) }) } diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index d6e1b84d9..cc7149674 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -1,5 +1,6 @@ use bytes::{Buf, BufMut}; use std::io; +use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; @@ -32,7 +33,7 @@ impl Socket { } impl AsyncRead for Socket { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { match &self.0 { Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), #[cfg(unix)] diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 6c7ce6794..d8fa1911f 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -18,7 +18,7 @@ impl Drop for StatementInner { let buf = client.with_buf(|buf| { frontend::close(b'S', &self.name, buf).unwrap(); frontend::sync(buf); - buf.take().freeze() + buf.split().freeze() }); let _ = client.send(RequestMessages::Single(FrontendMessage::Raw(buf))); } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index de95862d4..81c5d4602 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -11,7 +11,7 @@ use crate::Socket; use crate::{ bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; -use bytes::IntoBuf; +use bytes::Buf; use futures::{TryStream, TryStreamExt}; use postgres_protocol::message::frontend; use std::error; @@ -40,7 +40,7 @@ impl<'a> Drop for Transaction<'a> { }; let buf = self.client.inner().with_buf(|buf| { frontend::query(&query, buf).unwrap(); - buf.take().freeze() + buf.split().freeze() }); let _ = self .client @@ -218,8 +218,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, S: TryStream, - S::Ok: IntoBuf, - ::Buf: 'static + Send, + S::Ok: Buf + 'static + Send, S::Error: Into>, { self.client.copy_in(statement, params, stream).await diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 17e82dbe0..ce9cc9d8e 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,12 +1,13 @@ #![warn(rust_2018_idioms)] +use bytes::{Bytes, BytesMut}; use futures::channel::mpsc; use futures::{future, stream, StreamExt}; use futures::{join, try_join, FutureExt, TryStreamExt}; use std::fmt::Write; -use std::time::{Duration, Instant}; +use std::time::Duration; use tokio::net::TcpStream; -use tokio::timer; +use tokio::time; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; @@ -302,7 +303,7 @@ async fn cancel_query_raw() { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let cancel = client.cancel_query_raw(socket, NoTls); - let cancel = timer::delay(Instant::now() + Duration::from_millis(100)).then(|()| cancel); + let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); @@ -410,9 +411,12 @@ async fn copy_in() { let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); let stream = stream::iter( - vec![b"1\tjim\n".to_vec(), b"2\tjoe\n".to_vec()] - .into_iter() - .map(Ok::<_, String>), + vec![ + Bytes::from_static(b"1\tjim\n"), + Bytes::from_static(b"2\tjoe\n"), + ] + .into_iter() + .map(Ok::<_, String>), ); let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); assert_eq!(rows, 2); @@ -446,16 +450,20 @@ async fn copy_in_large() { let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let a = "0\tname0\n".to_string(); - let mut b = String::new(); + let a = Bytes::from_static(b"0\tname0\n"); + let mut b = BytesMut::new(); for i in 1..5_000 { writeln!(b, "{0}\tname{0}", i).unwrap(); } - let mut c = String::new(); + let mut c = BytesMut::new(); for i in 5_000..10_000 { writeln!(c, "{0}\tname{0}", i).unwrap(); } - let stream = stream::iter(vec![a, b, c].into_iter().map(Ok::<_, String>)); + let stream = stream::iter( + vec![a, b.freeze(), c.freeze()] + .into_iter() + .map(Ok::<_, String>), + ); let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); assert_eq!(rows, 10_000); @@ -476,7 +484,7 @@ async fn copy_in_error() { .unwrap(); let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let stream = stream::iter(vec![Ok(b"1\tjim\n".to_vec()), Err("asdf")]); + let stream = stream::iter(vec![Ok(Bytes::from_static(b"1\tjim\n")), Err("asdf")]); let error = client.copy_in(&stmt, &[], stream).await.unwrap_err(); assert!(error.to_string().contains("asdf")); @@ -509,7 +517,12 @@ async fn copy_out() { .copy_out(&stmt, &[]) .await .unwrap() - .try_concat() + .try_fold(BytesMut::new(), |mut buf, chunk| { + async move { + buf.extend_from_slice(&chunk); + Ok(buf) + } + }) .await .unwrap(); assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index dbfe91928..35219d8ad 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,6 +1,6 @@ use futures::{join, FutureExt}; -use std::time::{Duration, Instant}; -use tokio::timer; +use std::time::Duration; +use tokio::time; use tokio_postgres::error::SqlState; use tokio_postgres::{Client, NoTls}; @@ -71,7 +71,7 @@ async fn cancel_query() { let client = connect("host=localhost port=5433 user=postgres").await; let cancel = client.cancel_query(NoTls); - let cancel = timer::delay(Instant::now() + Duration::from_millis(100)).then(|()| cancel); + let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); From 7eb61c786b8d865a45e5d1ee639d1c12f730a6ce Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 26 Nov 2019 16:40:36 -0800 Subject: [PATCH 305/819] Fix up benchmark --- tokio-postgres/benches/bench.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs index fececa2b5..315bea8e4 100644 --- a/tokio-postgres/benches/bench.rs +++ b/tokio-postgres/benches/bench.rs @@ -7,7 +7,7 @@ use tokio::runtime::Runtime; use tokio_postgres::{Client, NoTls}; fn setup() -> (Client, Runtime) { - let runtime = Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let (client, conn) = runtime .block_on(tokio_postgres::connect( "host=localhost port=5433 user=postgres", @@ -19,7 +19,7 @@ fn setup() -> (Client, Runtime) { } fn query_prepared(c: &mut Criterion) { - let (client, runtime) = setup(); + let (client, mut runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("runtime_block_on", move |b| { b.iter(|| { @@ -29,13 +29,13 @@ fn query_prepared(c: &mut Criterion) { }) }); - let (client, runtime) = setup(); + let (client, mut runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("executor_block_on", move |b| { b.iter(|| executor::block_on(client.query(&statement, &[&1i64])).unwrap()) }); - let (client, runtime) = setup(); + let (client, mut runtime) = setup(); let client = Arc::new(client); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("spawned", move |b| { From 4a51a7e00a42a153ab431db6e9b50ea593fae99b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 26 Nov 2019 16:43:10 -0800 Subject: [PATCH 306/819] Fix serde-json --- postgres-types/src/serde_json_1.rs | 32 ++++-------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index 7b8cfe8fe..e5183d3f5 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,3 +1,5 @@ +use crate::{FromSql, IsNull, ToSql, Type}; +use bytes::buf::BufMutExt; use bytes::{BufMut, BytesMut}; use serde_1::{Deserialize, Serialize}; use serde_json_1::Value; @@ -5,32 +7,6 @@ use std::error::Error; use std::fmt::Debug; use std::io::Read; -use crate::{FromSql, IsNull, ToSql, Type}; - -// https://github.com/tokio-rs/bytes/issues/170 -struct B<'a>(&'a mut BytesMut); - -impl<'a> BufMut for B<'a> { - #[inline] - fn remaining_mut(&self) -> usize { - usize::max_value() - self.0.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - self.0.advance_mut(cnt); - } - - #[inline] - unsafe fn bytes_mut(&mut self) -> &mut [u8] { - if !self.0.has_remaining_mut() { - self.0.reserve(64); - } - - self.0.bytes_mut() - } -} - /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. #[derive(Debug)] pub struct Json(pub T); @@ -66,9 +42,9 @@ where out: &mut BytesMut, ) -> Result> { if *ty == Type::JSONB { - B(out).put_u8(1); + out.put_u8(1); } - serde_json_1::ser::to_writer(B(out).writer(), &self.0)?; + serde_json_1::ser::to_writer(out.writer(), &self.0)?; Ok(IsNull::No) } From 2d083ec1c89fb08010b9712081b6491667641169 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 20:32:13 -0500 Subject: [PATCH 307/819] Release postgres-protocol v0.5.0-alpha.2 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 1a2bd1ec2..400c27936 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.0-alpha.2 - 2019-11-27 + +### Changed + +* Upgraded `bytes` to 0.5. + ## v0.5.0-alpha.1 - 2019-10-14 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index baba826c8..7ad9d4996 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.0-alpha.1" +version = "0.5.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 480446c4a..ed14afd04 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -23,7 +23,7 @@ with-uuid-0_8 = ["uuid-08"] [dependencies] bytes = "0.5" fallible-iterator = "0.2" -postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } +postgres-protocol = { version = "=0.5.0-alpha.2", path = "../postgres-protocol" } postgres-derive = { version = "=0.4.0-alpha.1", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index bf6d543c0..501d3eb03 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -44,7 +44,7 @@ parking_lot = "0.9" percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" -postgres-protocol = { version = "=0.5.0-alpha.1", path = "../postgres-protocol" } +postgres-protocol = { version = "=0.5.0-alpha.2", path = "../postgres-protocol" } postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.2", features = ["codec"] } From de2b78dcefacc9eeafca742c3070ca194960043d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 20:38:32 -0500 Subject: [PATCH 308/819] Release postgres-types v0.1.0-alpha.2 --- postgres-types/CHANGELOG.md | 12 ++++++++++++ postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 postgres-types/CHANGELOG.md diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md new file mode 100644 index 000000000..071a2e65e --- /dev/null +++ b/postgres-types/CHANGELOG.md @@ -0,0 +1,12 @@ +# Change Log + +## v0.1.0-alpha.2 - 2019-11-27 + +### Changed + +* Upgraded `bytes` to 0.5. +* Upgraded `uuid` to 0.8. + +## v0.1.0-alpha.1 - 2019-10-14 + +Initial release diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index ed14afd04..0042b6d25 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 501d3eb03..f1137c8a3 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,7 @@ percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "=0.5.0-alpha.2", path = "../postgres-protocol" } -postgres-types = { version = "=0.1.0-alpha.1", path = "../postgres-types" } +postgres-types = { version = "=0.1.0-alpha.2", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.2", features = ["codec"] } From eff68d547faaeda690a99b174b67b4a90be1c471 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 20:42:31 -0500 Subject: [PATCH 309/819] Upgrade parking-lot --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f1137c8a3..381b216d7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,7 +40,7 @@ bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" log = "0.4" -parking_lot = "0.9" +parking_lot = "0.10" percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" From 47d97f0d2ef0b8b24f00e44f62fea8be33178b94 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 20:48:01 -0500 Subject: [PATCH 310/819] Relase tokio-postgres v0.5.0-alpha.2 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres-binary-copy/Cargo.toml | 4 ++-- tokio-postgres/CHANGELOG.md | 14 ++++++++++++-- tokio-postgres/Cargo.toml | 2 +- 6 files changed, 18 insertions(+), 8 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index bd71250bb..644a9c522 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -21,7 +21,7 @@ futures-preview = "=0.3.0-alpha.19" native-tls = "0.2" tokio = "0.2" tokio-tls = "0.3" -tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 7d28a4bbf..38ca27586 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3" openssl = "0.10" tokio = "0.2" tokio-openssl = "0.4" -tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 9625365bf..8ab2468fb 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -33,7 +33,7 @@ bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" pin-utils = "=0.1.0-alpha.4" -tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } tokio-executor = "=0.2.0-alpha.6" tokio = { version = "0.2", optional = true, features = ["rt-threaded"] } diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml index 2832f538b..0b64e4f2b 100644 --- a/tokio-postgres-binary-copy/Cargo.toml +++ b/tokio-postgres-binary-copy/Cargo.toml @@ -10,8 +10,8 @@ bytes = "0.5" futures = "0.3" parking_lot = "0.10" pin-project-lite = "0.1" -tokio-postgres = { version = "=0.5.0-alpha.1", default-features = false, path = "../tokio-postgres" } +tokio-postgres = { version = "=0.5.0-alpha.2", default-features = false, path = "../tokio-postgres" } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } -tokio-postgres = { version = "=0.5.0-alpha.1", path = "../tokio-postgres" } +tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres" } diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 51a3bf8b3..85f8bbdde 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,9 +1,19 @@ # Change Log -## Unreleased +## v0.5.0-alpha.2 - 2019-11-27 + +### Changed + +* Upgraded `bytes` to 0.5. +* Upgraded `tokio` to 0.2. +* The TLS interface uses a trait to obtain channel binding information rather than returning it after the handshake. +* Changed the value of the `timezone` property from `GMT` to `UTC`. +* Returned `Stream` implementations are now `!Unpin`. ### Added -* Added support for converting to and from `uuid` crate v0.8 + +* Added support for `uuid` 0.8. +* Added the column to `Row::try_get` errors. ## v0.5.0-alpha.1 - 2019-10-14 diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 381b216d7..3568300c3 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.0-alpha.1" +version = "0.5.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 223514fcd50f30ef6f0490b1c9d8e6ae5a78258d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 20:58:02 -0500 Subject: [PATCH 311/819] Fix custom spawn interface in sync API --- postgres/Cargo.toml | 2 -- postgres/src/config.rs | 50 ++++++++++++++++++------------------------ 2 files changed, 21 insertions(+), 31 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 8ab2468fb..96dd31200 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -32,9 +32,7 @@ with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -pin-utils = "=0.1.0-alpha.4" tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } -tokio-executor = "=0.2.0-alpha.6" tokio = { version = "0.2", optional = true, features = ["rt-threaded"] } lazy_static = { version = "1.0", optional = true } diff --git a/postgres/src/config.rs b/postgres/src/config.rs index df7bdbcc9..34de68e47 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -2,21 +2,20 @@ //! //! Requires the `runtime` Cargo feature (enabled by default). -use futures::{FutureExt, executor}; +use crate::{Client, RUNTIME}; +use futures::{executor, FutureExt}; use log::error; use std::fmt; +use std::future::Future; use std::path::Path; +use std::pin::Pin; use std::str::FromStr; -use std::sync::{mpsc, Arc, Mutex}; +use std::sync::{mpsc, Arc}; use std::time::Duration; -use tokio_executor::Executor; -use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use tokio_postgres::{Error, Socket}; - #[doc(inline)] pub use tokio_postgres::config::{ChannelBinding, SslMode, TargetSessionAttrs}; - -use crate::{Client, RUNTIME}; +use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; +use tokio_postgres::{Error, Socket}; /// Connection configuration. /// @@ -94,8 +93,7 @@ use crate::{Client, RUNTIME}; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, - // this is an option since we don't want to boot up our default runtime unless we're actually going to use it. - executor: Option>>, + spawner: Option + Send>>) + Sync + Send>>, } impl fmt::Debug for Config { @@ -117,7 +115,7 @@ impl Config { pub fn new() -> Config { Config { config: tokio_postgres::Config::new(), - executor: None, + spawner: None, } } @@ -242,14 +240,14 @@ impl Config { self } - /// Sets the executor used to run the connection futures. + /// Sets the spawner used to run the connection futures. /// /// Defaults to a postgres-specific tokio `Runtime`. - pub fn executor(&mut self, executor: E) -> &mut Config + pub fn spawner(&mut self, spawn: F) -> &mut Config where - E: Executor + 'static + Send, + F: Fn(Pin + Send>>) + 'static + Sync + Send, { - self.executor = Some(Arc::new(Mutex::new(executor))); + self.spawner = Some(Arc::new(spawn)); self } @@ -261,22 +259,20 @@ impl Config { T::Stream: Send, >::Future: Send, { - let (client, connection) = match &self.executor { - Some(executor) => { + let (client, connection) = match &self.spawner { + Some(spawn) => { let (tx, rx) = mpsc::channel(); let config = self.config.clone(); let connect = async move { let r = config.connect(tls).await; let _ = tx.send(r); }; - executor.lock().unwrap().spawn(Box::pin(connect)).unwrap(); + spawn(Box::pin(connect)); rx.recv().unwrap()? } None => { let connect = self.config.connect(tls); - RUNTIME.handle().enter(|| { - executor::block_on(connect) - })? + RUNTIME.handle().enter(|| executor::block_on(connect))? } }; @@ -285,13 +281,9 @@ impl Config { error!("postgres connection error: {}", e) } }); - match &self.executor { - Some(executor) => { - executor - .lock() - .unwrap() - .spawn(Box::pin(connection)) - .unwrap(); + match &self.spawner { + Some(spawn) => { + spawn(Box::pin(connection)); } None => { RUNTIME.spawn(connection); @@ -314,7 +306,7 @@ impl From for Config { fn from(config: tokio_postgres::Config) -> Config { Config { config, - executor: None, + spawner: None, } } } From fa3b068da72566e5df9b9a33eeab5b158902516e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 21:01:24 -0500 Subject: [PATCH 312/819] Release postgres v0.17.0-alpha.2 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 10 ++++++++-- postgres/Cargo.toml | 2 +- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 644a9c522..a7cd29b50 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -25,4 +25,4 @@ tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", defau [dev-dependencies] tokio = { version = "0.2", features = ["full"] } -postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } +postgres = { version = "=0.17.0-alpha.2", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 38ca27586..2f36ee378 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -25,4 +25,4 @@ tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", defau [dev-dependencies] tokio = { version = "0.2", features = ["full"] } -postgres = { version = "=0.17.0-alpha.1", path = "../postgres" } +postgres = { version = "=0.17.0-alpha.2", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 1e25bf5bb..11693ca5e 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,9 +1,15 @@ # Change Log -## Unreleased +## v0.17.0-alpha.2 - 2019-11-27 + +### Changed + +* Changed `Config::executor` to `Config::spawner`. ### Added -* Added support for converting to and from `uuid` crate v0.8 + +* Added support for `uuid` 0.8. +* Added `Transaction::query_one`. ## v0.17.0-alpha.1 - 2019-10-14 diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 96dd31200..cab09cea6 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.0-alpha.1" +version = "0.17.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From dc2416011a93883461f93ab29771520f3aa94969 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 21:03:58 -0500 Subject: [PATCH 313/819] Release postgres-native-tls v0.3.0-alpha.2 --- postgres-native-tls/CHANGELOG.md | 10 ++++++++-- postgres-native-tls/Cargo.toml | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index 5a8a37ec7..6bd09c065 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,13 +1,19 @@ # Change Log +## v0.3.0-alpha.2 - 2019-11-27 + +### Changed + +* Upgraded to `tokio-postgres` v0.5.0-alpha.2. + ## v0.3.0-alpha.1 - 2019-10-14 ### Changed -* Updated to tokio-postgres v0.5.0-alpha.1. +* Updated to `tokio-postgres` v0.5.0-alpha.1. ## v0.2.0-rc.1 - 2019-06-29 ### Changed -* Updated to tokio-postgres v0.4.0-rc. +* Updated to `tokio-postgres` v0.4.0-rc. diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index a7cd29b50..12d37c40c 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.3.0-alpha.1" +version = "0.3.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -17,7 +17,7 @@ runtime = ["tokio-postgres/runtime"] [dependencies] bytes = "0.5" -futures-preview = "=0.3.0-alpha.19" +futures = "0.3" native-tls = "0.2" tokio = "0.2" tokio-tls = "0.3" From 7c29346cd4493d2e532bd5dfb8d5956b594eb129 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 27 Nov 2019 21:07:51 -0500 Subject: [PATCH 314/819] Release postgres-openssl v0.3.0-alpha.2 --- postgres-openssl/CHANGELOG.md | 8 +++++++- postgres-openssl/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index 683902404..d1a32fbfb 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,10 +1,16 @@ # Change Log +## v0.3.0-alpha.2 - 2019-11-27 + +### Changed + +* Upgraded `tokio-postgres` v0.5.0-alpha.2. + ## v0.3.0-alpha.1 - 2019-10-14 ### Changed -* Updated to `tokio-postgres` v0.5.0-alpha.1 +* Updated to `tokio-postgres` v0.5.0-alpha.1. ## v0.2.0-rc.1 - 2019-03-06 diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 2f36ee378..8165b89db 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.3.0-alpha.1" +version = "0.3.0-alpha.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From a5428e6a03375aca5b510177f60687d27bc895ff Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 29 Nov 2019 10:15:46 -0500 Subject: [PATCH 315/819] wip sync copy-in --- postgres/src/copy_in_writer.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 postgres/src/copy_in_writer.rs diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs new file mode 100644 index 000000000..e69de29bb From e5e03b0064a8dab054bec9529c9f4024b9fc60cb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 11:04:59 -0500 Subject: [PATCH 316/819] Change the copy_in interface Rather than taking in a Stream and advancing it internally, return a Sink that can be advanced by the calling code. This significantly simplifies encoding logic for things like tokio-postgres-binary-copy. Similarly, the blocking interface returns a Writer. Closes #489 --- postgres/src/client.rs | 32 ++-- postgres/src/copy_in_stream.rs | 24 --- postgres/src/copy_in_writer.rs | 63 ++++++++ postgres/src/copy_out_reader.rs | 33 ++-- postgres/src/lib.rs | 4 +- postgres/src/test.rs | 31 +++- postgres/src/transaction.rs | 19 +-- tokio-postgres-binary-copy/Cargo.toml | 1 - tokio-postgres-binary-copy/src/lib.rs | 126 +++++---------- tokio-postgres-binary-copy/src/test.rs | 61 +++---- tokio-postgres/src/client.rs | 22 ++- tokio-postgres/src/copy_in.rs | 214 +++++++++++++++++-------- tokio-postgres/src/error/mod.rs | 9 -- tokio-postgres/src/lib.rs | 1 + tokio-postgres/src/transaction.rs | 17 +- tokio-postgres/tests/test/main.rs | 45 +++--- 16 files changed, 367 insertions(+), 335 deletions(-) delete mode 100644 postgres/src/copy_in_stream.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e680f8c5b..4047f1b12 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,19 +1,15 @@ +use crate::iter::Iter; +#[cfg(feature = "runtime")] +use crate::Config; +use crate::{CopyInWriter, CopyOutReader, Statement, ToStatement, Transaction}; use fallible_iterator::FallibleIterator; use futures::executor; -use std::io::{BufRead, Read}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; #[cfg(feature = "runtime")] use tokio_postgres::Socket; use tokio_postgres::{Error, Row, SimpleQueryMessage}; -use crate::copy_in_stream::CopyInStream; -use crate::copy_out_reader::CopyOutReader; -use crate::iter::Iter; -#[cfg(feature = "runtime")] -use crate::Config; -use crate::{Statement, ToStatement, Transaction}; - /// A synchronous PostgreSQL client. /// /// This is a lightweight wrapper over the asynchronous tokio_postgres `Client`. @@ -264,29 +260,33 @@ impl Client { /// The `query` argument can either be a `Statement`, or a raw query string. The data in the provided reader is /// passed along to the server verbatim; it is the caller's responsibility to ensure it uses the proper format. /// + /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. + /// /// # Examples /// /// ```no_run /// use postgres::{Client, NoTls}; + /// use std::io::Write; /// - /// # fn main() -> Result<(), postgres::Error> { + /// # fn main() -> Result<(), Box> { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// - /// client.copy_in("COPY people FROM stdin", &[], &mut "1\tjohn\n2\tjane\n".as_bytes())?; + /// let mut writer = client.copy_in("COPY people FROM stdin", &[])?; + /// writer.write_all(b"1\tjohn\n2\tjane\n")?; + /// writer.finish()?; /// # Ok(()) /// # } /// ``` - pub fn copy_in( + pub fn copy_in( &mut self, query: &T, params: &[&(dyn ToSql + Sync)], - reader: R, - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, - R: Read + Unpin, { - executor::block_on(self.0.copy_in(query, params, CopyInStream(reader))) + let sink = executor::block_on(self.0.copy_in(query, params))?; + Ok(CopyInWriter::new(sink)) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -312,7 +312,7 @@ impl Client { &mut self, query: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, { diff --git a/postgres/src/copy_in_stream.rs b/postgres/src/copy_in_stream.rs deleted file mode 100644 index 6bda3e5dd..000000000 --- a/postgres/src/copy_in_stream.rs +++ /dev/null @@ -1,24 +0,0 @@ -use futures::Stream; -use std::io::{self, Cursor, Read}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub struct CopyInStream(pub R); - -impl Stream for CopyInStream -where - R: Read + Unpin, -{ - type Item = io::Result>>; - - fn poll_next( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>>>> { - let mut buf = vec![]; - match self.0.by_ref().take(4096).read_to_end(&mut buf)? { - 0 => Poll::Ready(None), - _ => Poll::Ready(Some(Ok(Cursor::new(buf)))), - } - } -} diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index e69de29bb..b7a2a009e 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -0,0 +1,63 @@ +use bytes::{Bytes, BytesMut}; +use futures::{executor, SinkExt}; +use std::io; +use std::io::Write; +use std::marker::PhantomData; +use std::pin::Pin; +use tokio_postgres::{CopyInSink, Error}; + +/// The writer returned by the `copy_in` method. +/// +/// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. +pub struct CopyInWriter<'a> { + sink: Pin>>, + buf: BytesMut, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend borrow until drop +impl Drop for CopyInWriter<'_> { + fn drop(&mut self) {} +} + +impl<'a> CopyInWriter<'a> { + pub(crate) fn new(sink: CopyInSink) -> CopyInWriter<'a> { + CopyInWriter { + sink: Box::pin(sink), + buf: BytesMut::new(), + _p: PhantomData, + } + } + + /// Completes the copy, returning the number of rows written. + /// + /// If this is not called, the copy will be aborted. + pub fn finish(mut self) -> Result { + self.flush_inner()?; + executor::block_on(self.sink.as_mut().finish()) + } + + fn flush_inner(&mut self) -> Result<(), Error> { + if self.buf.is_empty() { + return Ok(()); + } + + executor::block_on(self.sink.as_mut().send(self.buf.split().freeze())) + } +} + +impl Write for CopyInWriter<'_> { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.buf.len() > 4096 { + self.flush()?; + } + + self.buf.extend_from_slice(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + self.flush_inner() + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + } +} diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 680d4d318..f1309fb8a 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,33 +1,24 @@ use bytes::{Buf, Bytes}; -use futures::{executor, Stream}; +use futures::executor; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; use std::pin::Pin; -use tokio_postgres::Error; +use tokio_postgres::{CopyStream, Error}; /// The reader returned by the `copy_out` method. -pub struct CopyOutReader<'a, S> -where - S: Stream, -{ - it: executor::BlockingStream>>, +pub struct CopyOutReader<'a> { + it: executor::BlockingStream>>, cur: Cursor, _p: PhantomData<&'a mut ()>, } // no-op impl to extend borrow until drop -impl<'a, S> Drop for CopyOutReader<'a, S> -where - S: Stream, -{ +impl Drop for CopyOutReader<'_> { fn drop(&mut self) {} } -impl<'a, S> CopyOutReader<'a, S> -where - S: Stream>, -{ - pub(crate) fn new(stream: S) -> Result, Error> { +impl<'a> CopyOutReader<'a> { + pub(crate) fn new(stream: CopyStream) -> Result, Error> { let mut it = executor::block_on_stream(Box::pin(stream)); let cur = match it.next() { Some(Ok(cur)) => cur, @@ -43,10 +34,7 @@ where } } -impl<'a, S> Read for CopyOutReader<'a, S> -where - S: Stream>, -{ +impl Read for CopyOutReader<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let b = self.fill_buf()?; let len = usize::min(buf.len(), b.len()); @@ -56,10 +44,7 @@ where } } -impl<'a, S> BufRead for CopyOutReader<'a, S> -where - S: Stream>, -{ +impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { if self.cur.remaining() == 0 { match self.it.next() { diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 1cf9e3248..d63e10a7b 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -69,6 +69,8 @@ pub use tokio_postgres::{ pub use crate::client::*; #[cfg(feature = "runtime")] pub use crate::config::Config; +pub use crate::copy_in_writer::CopyInWriter; +pub use crate::copy_out_reader::CopyOutReader; #[doc(no_inline)] pub use crate::error::Error; #[doc(no_inline)] @@ -80,7 +82,7 @@ pub use crate::transaction::*; mod client; #[cfg(feature = "runtime")] pub mod config; -mod copy_in_stream; +mod copy_in_writer; mod copy_out_reader; mod iter; mod transaction; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index f7d84a88d..d376d186f 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,4 @@ -use std::io::Read; +use std::io::{Read, Write}; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -154,13 +154,9 @@ fn copy_in() { .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") .unwrap(); - client - .copy_in( - "COPY foo FROM stdin", - &[], - &mut &b"1\tsteven\n2\ttimothy"[..], - ) - .unwrap(); + let mut writer = client.copy_in("COPY foo FROM stdin", &[]).unwrap(); + writer.write_all(b"1\tsteven\n2\ttimothy").unwrap(); + writer.finish().unwrap(); let rows = client .query("SELECT id, name FROM foo ORDER BY id", &[]) @@ -173,6 +169,25 @@ fn copy_in() { assert_eq!(rows[1].get::<_, &str>(1), "timothy"); } +#[test] +fn copy_in_abort() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") + .unwrap(); + + let mut writer = client.copy_in("COPY foo FROM stdin", &[]).unwrap(); + writer.write_all(b"1\tsteven\n2\ttimothy").unwrap(); + drop(writer); + + let rows = client + .query("SELECT id, name FROM foo ORDER BY id", &[]) + .unwrap(); + + assert_eq!(rows.len(), 0); +} + #[test] fn copy_out() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 8b857bb06..17631c79a 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,14 +1,10 @@ +use crate::iter::Iter; +use crate::{CopyInWriter, CopyOutReader, Portal, Statement, ToStatement}; use fallible_iterator::FallibleIterator; use futures::executor; -use std::io::{BufRead, Read}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; -use crate::copy_in_stream::CopyInStream; -use crate::copy_out_reader::CopyOutReader; -use crate::iter::Iter; -use crate::{Portal, Statement, ToStatement}; - /// A representation of a PostgreSQL database transaction. /// /// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made @@ -117,17 +113,16 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_in`. - pub fn copy_in( + pub fn copy_in( &mut self, query: &T, params: &[&(dyn ToSql + Sync)], - reader: R, - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, - R: Read + Unpin, { - executor::block_on(self.0.copy_in(query, params, CopyInStream(reader))) + let sink = executor::block_on(self.0.copy_in(query, params))?; + Ok(CopyInWriter::new(sink)) } /// Like `Client::copy_out`. @@ -135,7 +130,7 @@ impl<'a> Transaction<'a> { &mut self, query: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, { diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml index 0b64e4f2b..a8d44bf55 100644 --- a/tokio-postgres-binary-copy/Cargo.toml +++ b/tokio-postgres-binary-copy/Cargo.toml @@ -8,7 +8,6 @@ edition = "2018" byteorder = "1.0" bytes = "0.5" futures = "0.3" -parking_lot = "0.10" pin-project-lite = "0.1" tokio-postgres = { version = "=0.5.0-alpha.2", default-features = false, path = "../tokio-postgres" } diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index 59acf7460..b90768b2e 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -1,145 +1,95 @@ use bytes::{BufMut, Bytes, BytesMut, Buf}; -use futures::{future, ready, Stream}; -use parking_lot::Mutex; +use futures::{ready, Stream, SinkExt}; use pin_project_lite::pin_project; use std::convert::TryFrom; use std::error::Error; -use std::future::Future; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use tokio_postgres::types::{IsNull, ToSql, Type, FromSql, WrongType}; -use tokio_postgres::CopyStream; +use tokio_postgres::{CopyStream, CopyInSink}; use std::io::Cursor; use byteorder::{ByteOrder, BigEndian}; #[cfg(test)] mod test; -const BLOCK_SIZE: usize = 4096; const MAGIC: &[u8] = b"PGCOPY\n\xff\r\n\0"; const HEADER_LEN: usize = MAGIC.len() + 4 + 4; pin_project! { - pub struct BinaryCopyInStream { + pub struct BinaryCopyInWriter { #[pin] - future: F, - buf: Arc>, - done: bool, + sink: CopyInSink, + types: Vec, + buf: BytesMut, } } -impl BinaryCopyInStream -where - F: Future>>, -{ - pub fn new(types: &[Type], write_values: M) -> BinaryCopyInStream - where - M: FnOnce(BinaryCopyInWriter) -> F, - { +impl BinaryCopyInWriter { + pub fn new(sink: CopyInSink, types: &[Type]) -> BinaryCopyInWriter { let mut buf = BytesMut::new(); buf.reserve(HEADER_LEN); buf.put_slice(MAGIC); // magic buf.put_i32(0); // flags buf.put_i32(0); // header extension - let buf = Arc::new(Mutex::new(buf)); - let writer = BinaryCopyInWriter { - buf: buf.clone(), + BinaryCopyInWriter { + sink, types: types.to_vec(), - }; - - BinaryCopyInStream { - future: write_values(writer), buf, - done: false, } } -} - -impl Stream for BinaryCopyInStream -where - F: Future>>, -{ - type Item = Result>; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - if *this.done { - return Poll::Ready(None); - } - - *this.done = this.future.poll(cx)?.is_ready(); - - let mut buf = this.buf.lock(); - if *this.done { - buf.reserve(2); - buf.put_i16(-1); - Poll::Ready(Some(Ok(buf.split().freeze()))) - } else if buf.len() > BLOCK_SIZE { - Poll::Ready(Some(Ok(buf.split().freeze()))) - } else { - Poll::Pending - } - } -} - -// FIXME this should really just take a reference to the buffer, but that requires HKT :( -pub struct BinaryCopyInWriter { - buf: Arc>, - types: Vec, -} - -impl BinaryCopyInWriter { pub async fn write( - &mut self, + self: Pin<&mut Self>, values: &[&(dyn ToSql + Send)], ) -> Result<(), Box> { self.write_raw(values.iter().cloned()).await } - pub async fn write_raw<'a, I>(&mut self, values: I) -> Result<(), Box> - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, + pub async fn write_raw<'a, I>(self: Pin<&mut Self>, values: I) -> Result<(), Box> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, { + let mut this = self.project(); + let values = values.into_iter(); assert!( - values.len() == self.types.len(), + values.len() == this.types.len(), "expected {} values but got {}", - self.types.len(), + this.types.len(), values.len(), ); - future::poll_fn(|_| { - if self.buf.lock().len() > BLOCK_SIZE { - Poll::Pending - } else { - Poll::Ready(()) - } - }) - .await; - - let mut buf = self.buf.lock(); - - buf.reserve(2); - buf.put_u16(self.types.len() as u16); + this.buf.put_i16(this.types.len() as i16); - for (value, type_) in values.zip(&self.types) { - let idx = buf.len(); - buf.reserve(4); - buf.put_i32(0); - let len = match value.to_sql_checked(type_, &mut buf)? { + for (value, type_) in values.zip(this.types) { + let idx = this.buf.len(); + this.buf.put_i32(0); + let len = match value.to_sql_checked(type_, this.buf)? { IsNull::Yes => -1, - IsNull::No => i32::try_from(buf.len() - idx - 4)?, + IsNull::No => i32::try_from(this.buf.len() - idx - 4)?, }; - BigEndian::write_i32(&mut buf[idx..], len); + BigEndian::write_i32(&mut this.buf[idx..], len); + } + + if this.buf.len() > 4096 { + this.sink.send(this.buf.split().freeze()).await?; } Ok(()) } + + pub async fn finish(self: Pin<&mut Self>) -> Result { + let mut this = self.project(); + + this.buf.put_i16(-1); + this.sink.send(this.buf.split().freeze()).await?; + this.sink.finish().await + } } struct Header { diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs index f4d193518..7d8bcd30c 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres-binary-copy/src/test.rs @@ -1,7 +1,7 @@ -use crate::{BinaryCopyInStream, BinaryCopyOutStream}; +use crate::{BinaryCopyInWriter, BinaryCopyOutStream}; use tokio_postgres::types::Type; use tokio_postgres::{Client, NoTls}; -use futures::TryStreamExt; +use futures::{TryStreamExt, pin_mut}; async fn connect() -> Client { let (client, connection) = @@ -23,19 +23,12 @@ async fn write_basic() { .await .unwrap(); - let stream = BinaryCopyInStream::new(&[Type::INT4, Type::TEXT], |mut w| { - async move { - w.write(&[&1i32, &"foobar"]).await?; - w.write(&[&2i32, &None::<&str>]).await?; - - Ok(()) - } - }); - - client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) - .await - .unwrap(); + let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); + pin_mut!(writer); + writer.as_mut().write(&[&1i32, &"foobar"]).await.unwrap(); + writer.as_mut().write(&[&2i32, &None::<&str>]).await.unwrap(); + writer.finish().await.unwrap(); let rows = client .query("SELECT id, bar FROM foo ORDER BY id", &[]) @@ -57,20 +50,15 @@ async fn write_many_rows() { .await .unwrap(); - let stream = BinaryCopyInStream::new(&[Type::INT4, Type::TEXT], |mut w| { - async move { - for i in 0..10_000i32 { - w.write(&[&i, &format!("the value for {}", i)]).await?; - } + let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); + pin_mut!(writer); - Ok(()) - } - }); + for i in 0..10_000i32 { + writer.as_mut().write(&[&i, &format!("the value for {}", i)]).await.unwrap(); + } - client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) - .await - .unwrap(); + writer.finish().await.unwrap(); let rows = client .query("SELECT id, bar FROM foo ORDER BY id", &[]) @@ -91,20 +79,15 @@ async fn write_big_rows() { .await .unwrap(); - let stream = BinaryCopyInStream::new(&[Type::INT4, Type::BYTEA], |mut w| { - async move { - for i in 0..2i32 { - w.write(&[&i, &vec![i as u8; 128 * 1024]]).await?; - } + let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::BYTEA]); + pin_mut!(writer); - Ok(()) - } - }); + for i in 0..2i32 { + writer.as_mut().write(&[&i, &vec![i as u8; 128 * 1024]]).await.unwrap(); + } - client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[], stream) - .await - .unwrap(); + writer.finish().await.unwrap(); let rows = client .query("SELECT id, bar FROM foo ORDER BY id", &[]) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 4d7c20536..984d401bd 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -14,18 +14,17 @@ use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query_raw, copy_in, copy_out, query, Transaction}; +use crate::{cancel_query_raw, copy_in, copy_out, query, CopyInSink, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; -use futures::{future, pin_mut, ready, StreamExt, TryStream, TryStreamExt}; +use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; use std::collections::HashMap; -use std::error; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -340,29 +339,26 @@ impl Client { query::execute(self.inner(), statement, params).await } - /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. + /// Executes a `COPY FROM STDIN` statement, returning a sink used to write the copy data. /// - /// The data in the provided stream is passed along to the server verbatim; it is the caller's responsibility to - /// ensure it uses the proper format. + /// The copy *must* be explicitly completed via the `Sink::close` or `finish` methods. If it is + /// not, the copy will be aborted. /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. - pub async fn copy_in( + pub async fn copy_in( &self, statement: &T, params: &[&(dyn ToSql + Sync)], - stream: S, - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, - S: TryStream, - S::Ok: Buf + 'static + Send, - S::Error: Into>, + U: Buf + 'static + Send, { let statement = statement.__convert().into_statement(self).await?; let params = slice_iter(params); - copy_in::copy_in(self.inner(), statement, params, stream).await + copy_in::copy_in(self.inner(), statement, params).await } /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index ebacb6cf1..b1cdae598 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,4 +1,4 @@ -use crate::client::InnerClient; +use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::ToSql; @@ -6,11 +6,13 @@ use crate::{query, Error, Statement}; use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; -use futures::{pin_mut, ready, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use futures::{ready, Sink, SinkExt, Stream, StreamExt}; +use futures::future; +use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use postgres_protocol::message::frontend::CopyData; -use std::error; +use std::marker::{PhantomPinned, PhantomData}; use std::pin::Pin; use std::task::{Context, Poll}; @@ -61,18 +63,148 @@ impl Stream for CopyInReceiver { } } -pub async fn copy_in<'a, I, S>( +enum SinkState { + Active, + Closing, + Reading, +} + +pin_project! { + /// A sink for `COPY ... FROM STDIN` query data. + /// + /// The copy *must* be explicitly completed via the `Sink::close` or `finish` methods. If it is + /// not, the copy will be aborted. + pub struct CopyInSink { + #[pin] + sender: mpsc::Sender, + responses: Responses, + buf: BytesMut, + state: SinkState, + #[pin] + _p: PhantomPinned, + _p2: PhantomData, + } +} + +impl CopyInSink +where + T: Buf + 'static + Send, +{ + /// A poll-based version of `finish`. + pub fn poll_finish(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match self.state { + SinkState::Active => { + ready!(self.as_mut().poll_flush(cx))?; + let mut this = self.as_mut().project(); + ready!(this.sender.as_mut().poll_ready(cx)).map_err(|_| Error::closed())?; + this.sender + .start_send(CopyInMessage::Done) + .map_err(|_| Error::closed())?; + *this.state = SinkState::Closing; + } + SinkState::Closing => { + let this = self.as_mut().project(); + ready!(this.sender.poll_close(cx)).map_err(|_| Error::closed())?; + *this.state = SinkState::Reading; + } + SinkState::Reading => { + let this = self.as_mut().project(); + match ready!(this.responses.poll_next(cx))? { + Message::CommandComplete(body) => { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + return Poll::Ready(Ok(rows)); + } + _ => return Poll::Ready(Err(Error::unexpected_message())), + } + } + } + } + } + + /// Completes the copy, returning the number of rows inserted. + /// + /// The `Sink::close` method is equivalent to `finish`, except that it does not return the + /// number of rows. + pub async fn finish(mut self: Pin<&mut Self>) -> Result { + future::poll_fn(|cx| self.as_mut().poll_finish(cx)).await + } +} + +impl Sink for CopyInSink +where + T: Buf + 'static + Send, +{ + type Error = Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project() + .sender + .poll_ready(cx) + .map_err(|_| Error::closed()) + } + + fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Error> { + let this = self.project(); + + let data: Box = if item.remaining() > 4096 { + if this.buf.is_empty() { + Box::new(item) + } else { + Box::new(this.buf.split().freeze().chain(item)) + } + } else { + this.buf.put(item); + if this.buf.len() > 4096 { + Box::new(this.buf.split().freeze()) + } else { + return Ok(()); + } + }; + + let data = CopyData::new(data).map_err(Error::encode)?; + this.sender + .start_send(CopyInMessage::Message(FrontendMessage::CopyData(data))) + .map_err(|_| Error::closed()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.project(); + + if !this.buf.is_empty() { + ready!(this.sender.as_mut().poll_ready(cx)).map_err(|_| Error::closed())?; + let data: Box = Box::new(this.buf.split().freeze()); + let data = CopyData::new(data).map_err(Error::encode)?; + this.sender + .as_mut() + .start_send(CopyInMessage::Message(FrontendMessage::CopyData(data))) + .map_err(|_| Error::closed())?; + } + + this.sender.poll_flush(cx).map_err(|_| Error::closed()) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.poll_finish(cx).map_ok(|_| ()) + } +} + +pub async fn copy_in<'a, I, T>( client: &InnerClient, statement: Statement, params: I, - stream: S, -) -> Result +) -> Result, Error> where I: IntoIterator, I::IntoIter: ExactSizeIterator, - S: TryStream, - S::Ok: Buf + 'static + Send, - S::Error: Into>, + T: Buf + 'static + Send, { let buf = query::encode(client, &statement, params)?; @@ -95,60 +227,12 @@ where _ => return Err(Error::unexpected_message()), } - let mut bytes = BytesMut::new(); - let stream = stream.into_stream(); - pin_mut!(stream); - - while let Some(buf) = stream.try_next().await.map_err(Error::copy_in_stream)? { - let data: Box = if buf.remaining() > 4096 { - if bytes.is_empty() { - Box::new(buf) - } else { - Box::new(bytes.split().freeze().chain(buf)) - } - } else { - bytes.reserve(buf.remaining()); - bytes.put(buf); - if bytes.len() > 4096 { - Box::new(bytes.split().freeze()) - } else { - continue; - } - }; - - let data = CopyData::new(data).map_err(Error::encode)?; - sender - .send(CopyInMessage::Message(FrontendMessage::CopyData(data))) - .await - .map_err(|_| Error::closed())?; - } - - if !bytes.is_empty() { - let data: Box = Box::new(bytes.freeze()); - let data = CopyData::new(data).map_err(Error::encode)?; - sender - .send(CopyInMessage::Message(FrontendMessage::CopyData(data))) - .await - .map_err(|_| Error::closed())?; - } - - sender - .send(CopyInMessage::Done) - .await - .map_err(|_| Error::closed())?; - - match responses.next().await? { - Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - Ok(rows) - } - _ => Err(Error::unexpected_message()), - } + Ok(CopyInSink { + sender, + responses, + buf: BytesMut::new(), + state: SinkState::Active, + _p: PhantomPinned, + _p2: PhantomData, + }) } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 4dde62f7f..788e70cf4 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -337,7 +337,6 @@ enum Kind { ToSql(usize), FromSql(usize), Column(String), - CopyInStream, Closed, Db, Parse, @@ -376,7 +375,6 @@ impl fmt::Display for Error { Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, - Kind::CopyInStream => fmt.write_str("error from a copy_in stream")?, Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -458,13 +456,6 @@ impl Error { Error::new(Kind::Column(column), None) } - pub(crate) fn copy_in_stream(e: E) -> Error - where - E: Into>, - { - Error::new(Kind::CopyInStream, Some(e.into())) - } - pub(crate) fn tls(e: Box) -> Error { Error::new(Kind::Tls, Some(e)) } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index dc88389fb..61367f290 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -105,6 +105,7 @@ pub use crate::client::Client; pub use crate::config::Config; pub use crate::connection::Connection; +pub use crate::copy_in::CopyInSink; pub use crate::copy_out::CopyStream; use crate::error::DbError; pub use crate::error::Error; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 81c5d4602..ac44a841b 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -9,12 +9,12 @@ use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{ - bind, query, slice_iter, Client, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, + bind, query, slice_iter, Client, CopyInSink, Error, Portal, Row, SimpleQueryMessage, Statement, + ToStatement, }; use bytes::Buf; -use futures::{TryStream, TryStreamExt}; +use futures::{TryStreamExt}; use postgres_protocol::message::frontend; -use std::error; use tokio::io::{AsyncRead, AsyncWrite}; /// A representation of a PostgreSQL database transaction. @@ -209,19 +209,16 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_in`. - pub async fn copy_in( + pub async fn copy_in( &self, statement: &T, params: &[&(dyn ToSql + Sync)], - stream: S, - ) -> Result + ) -> Result, Error> where T: ?Sized + ToStatement, - S: TryStream, - S::Ok: Buf + 'static + Send, - S::Error: Into>, + U: Buf + 'static + Send, { - self.client.copy_in(statement, params, stream).await + self.client.copy_in(statement, params).await } /// Like `Client::copy_out`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index ce9cc9d8e..4e6086f48 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -2,8 +2,7 @@ use bytes::{Bytes, BytesMut}; use futures::channel::mpsc; -use futures::{future, stream, StreamExt}; -use futures::{join, try_join, FutureExt, TryStreamExt}; +use futures::{future, stream, StreamExt, SinkExt, pin_mut, join, try_join, FutureExt, TryStreamExt}; use std::fmt::Write; use std::time::Duration; use tokio::net::TcpStream; @@ -409,23 +408,21 @@ async fn copy_in() { .await .unwrap(); - let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let stream = stream::iter( + let mut stream = stream::iter( vec![ Bytes::from_static(b"1\tjim\n"), Bytes::from_static(b"2\tjoe\n"), ] .into_iter() - .map(Ok::<_, String>), + .map(Ok::<_, Error>), ); - let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); + let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + pin_mut!(sink); + sink.send_all(&mut stream).await.unwrap(); + let rows = sink.finish().await.unwrap(); assert_eq!(rows, 2); - let stmt = client - .prepare("SELECT id, name FROM foo ORDER BY id") - .await - .unwrap(); - let rows = client.query(&stmt, &[]).await.unwrap(); + let rows = client.query("SELECT id, name FROM foo ORDER BY id", &[]).await.unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -448,8 +445,6 @@ async fn copy_in_large() { .await .unwrap(); - let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let a = Bytes::from_static(b"0\tname0\n"); let mut b = BytesMut::new(); for i in 1..5_000 { @@ -459,13 +454,16 @@ async fn copy_in_large() { for i in 5_000..10_000 { writeln!(c, "{0}\tname{0}", i).unwrap(); } - let stream = stream::iter( + let mut stream = stream::iter( vec![a, b.freeze(), c.freeze()] .into_iter() - .map(Ok::<_, String>), + .map(Ok::<_, Error>), ); - let rows = client.copy_in(&stmt, &[], stream).await.unwrap(); + let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + pin_mut!(sink); + sink.send_all(&mut stream).await.unwrap(); + let rows = sink.finish().await.unwrap(); assert_eq!(rows, 10_000); } @@ -483,16 +481,13 @@ async fn copy_in_error() { .await .unwrap(); - let stmt = client.prepare("COPY foo FROM STDIN").await.unwrap(); - let stream = stream::iter(vec![Ok(Bytes::from_static(b"1\tjim\n")), Err("asdf")]); - let error = client.copy_in(&stmt, &[], stream).await.unwrap_err(); - assert!(error.to_string().contains("asdf")); + { + let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + pin_mut!(sink); + sink.send(Bytes::from_static(b"1\tsteven")).await.unwrap(); + } - let stmt = client - .prepare("SELECT id, name FROM foo ORDER BY id") - .await - .unwrap(); - let rows = client.query(&stmt, &[]).await.unwrap(); + let rows = client.query("SELECT id, name FROM foo ORDER BY id", &[]).await.unwrap(); assert_eq!(rows.len(), 0); } From ef0b79fbcb52ba40c4ca5269875f8723f97d7dc5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 11:45:33 -0500 Subject: [PATCH 317/819] docs fix --- tokio-postgres/src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 984d401bd..bfc2579ff 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -383,8 +383,8 @@ impl Client { /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that /// point. The simple query protocol returns the values in rows as strings rather than in their binary encodings, - /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a stream over the - /// rows, this method returns a stream over an enum which indicates either the completion of one of the commands, + /// so the associated row type doesn't work with the `FromSql` trait. Rather than simply returning a list of the + /// rows, this method returns a list of an enum which indicates either the completion of one of the commands, /// or a row of data. This preserves the framing between the separate statements in the request. /// /// # Warning From 1390cc57d77ed54e44efdde5aeeb60c4f7c9d524 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 12:01:09 -0500 Subject: [PATCH 318/819] doc fix --- postgres/src/transaction.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 17631c79a..0e361e366 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -8,7 +8,7 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// A representation of a PostgreSQL database transaction. /// /// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made -/// in the transaction. Transactions can be nested, with inner transactions implemented via safepoints. +/// in the transaction. Transactions can be nested, with inner transactions implemented via savepoints. pub struct Transaction<'a>(tokio_postgres::Transaction<'a>); impl<'a> Transaction<'a> { From ff3ea1c9df45baa444467142666c767cf5647aaf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 12:13:38 -0500 Subject: [PATCH 319/819] Expose RowIter --- postgres/src/client.rs | 12 +++------- postgres/src/iter.rs | 45 ------------------------------------- postgres/src/lib.rs | 3 ++- postgres/src/row_iter.rs | 34 ++++++++++++++++++++++++++++ postgres/src/transaction.rs | 16 +++++-------- 5 files changed, 44 insertions(+), 66 deletions(-) delete mode 100644 postgres/src/iter.rs create mode 100644 postgres/src/row_iter.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 4047f1b12..fad087fbf 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,8 +1,6 @@ -use crate::iter::Iter; #[cfg(feature = "runtime")] use crate::Config; -use crate::{CopyInWriter, CopyOutReader, Statement, ToStatement, Transaction}; -use fallible_iterator::FallibleIterator; +use crate::{CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; use futures::executor; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; @@ -183,18 +181,14 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query_raw<'a, T, I>( - &mut self, - query: &T, - params: I, - ) -> Result, Error> + pub fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = executor::block_on(self.0.query_raw(query, params))?; - Ok(Iter::new(stream)) + Ok(RowIter::new(stream)) } /// Creates a new prepared statement. diff --git a/postgres/src/iter.rs b/postgres/src/iter.rs deleted file mode 100644 index 1f3ffc962..000000000 --- a/postgres/src/iter.rs +++ /dev/null @@ -1,45 +0,0 @@ -use fallible_iterator::FallibleIterator; -use futures::executor::{self, BlockingStream}; -use futures::Stream; -use std::marker::PhantomData; -use std::pin::Pin; - -pub struct Iter<'a, S> -where - S: Stream, -{ - it: BlockingStream>>, - _p: PhantomData<&'a mut ()>, -} - -// no-op impl to extend the borrow until drop -impl<'a, S> Drop for Iter<'a, S> -where - S: Stream, -{ - fn drop(&mut self) {} -} - -impl<'a, S> Iter<'a, S> -where - S: Stream, -{ - pub fn new(stream: S) -> Iter<'a, S> { - Iter { - it: executor::block_on_stream(Box::pin(stream)), - _p: PhantomData, - } - } -} - -impl<'a, S, T, E> FallibleIterator for Iter<'a, S> -where - S: Stream>, -{ - type Item = T; - type Error = E; - - fn next(&mut self) -> Result, E> { - self.it.next().transpose() - } -} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index d63e10a7b..13c9a21d8 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -75,6 +75,7 @@ pub use crate::copy_out_reader::CopyOutReader; pub use crate::error::Error; #[doc(no_inline)] pub use crate::row::{Row, SimpleQueryRow}; +pub use crate::row_iter::RowIter; #[doc(no_inline)] pub use crate::tls::NoTls; pub use crate::transaction::*; @@ -84,7 +85,7 @@ mod client; pub mod config; mod copy_in_writer; mod copy_out_reader; -mod iter; +mod row_iter; mod transaction; #[cfg(feature = "runtime")] diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs new file mode 100644 index 000000000..08da68739 --- /dev/null +++ b/postgres/src/row_iter.rs @@ -0,0 +1,34 @@ +use fallible_iterator::FallibleIterator; +use futures::executor::{self, BlockingStream}; +use std::marker::PhantomData; +use std::pin::Pin; +use tokio_postgres::{Error, Row, RowStream}; + +/// The iterator returned by `query_raw`. +pub struct RowIter<'a> { + it: BlockingStream>>, + _p: PhantomData<&'a mut ()>, +} + +// no-op impl to extend the borrow until drop +impl Drop for RowIter<'_> { + fn drop(&mut self) {} +} + +impl<'a> RowIter<'a> { + pub(crate) fn new(stream: RowStream) -> RowIter<'a> { + RowIter { + it: executor::block_on_stream(Box::pin(stream)), + _p: PhantomData, + } + } +} + +impl FallibleIterator for RowIter<'_> { + type Item = Row; + type Error = Error; + + fn next(&mut self) -> Result, Error> { + self.it.next().transpose() + } +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 0e361e366..b42ba64ff 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,6 +1,4 @@ -use crate::iter::Iter; -use crate::{CopyInWriter, CopyOutReader, Portal, Statement, ToStatement}; -use fallible_iterator::FallibleIterator; +use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; use futures::executor; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -63,18 +61,14 @@ impl<'a> Transaction<'a> { } /// Like `Client::query_raw`. - pub fn query_raw<'b, T, I>( - &mut self, - query: &T, - params: I, - ) -> Result, Error> + pub fn query_raw<'b, T, I>(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = executor::block_on(self.0.query_raw(query, params))?; - Ok(Iter::new(stream)) + Ok(RowIter::new(stream)) } /// Binds parameters to a statement, creating a "portal". @@ -107,9 +101,9 @@ impl<'a> Transaction<'a> { &mut self, portal: &Portal, max_rows: i32, - ) -> Result, Error> { + ) -> Result, Error> { let stream = executor::block_on(self.0.query_portal_raw(portal, max_rows))?; - Ok(Iter::new(stream)) + Ok(RowIter::new(stream)) } /// Like `Client::copy_in`. From 299ef6c8dde78b3d5e84bbb2600258e3ea36bf8e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 16:17:23 -0500 Subject: [PATCH 320/819] Rename CopyStream to CopyOutStream --- postgres/src/copy_out_reader.rs | 6 +++--- tokio-postgres-binary-copy/src/lib.rs | 6 +++--- tokio-postgres/src/client.rs | 4 ++-- tokio-postgres/src/copy_out.rs | 8 ++++---- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/src/transaction.rs | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index f1309fb8a..cd9727252 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -3,11 +3,11 @@ use futures::executor; use std::io::{self, BufRead, Cursor, Read}; use std::marker::PhantomData; use std::pin::Pin; -use tokio_postgres::{CopyStream, Error}; +use tokio_postgres::{CopyOutStream, Error}; /// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { - it: executor::BlockingStream>>, + it: executor::BlockingStream>>, cur: Cursor, _p: PhantomData<&'a mut ()>, } @@ -18,7 +18,7 @@ impl Drop for CopyOutReader<'_> { } impl<'a> CopyOutReader<'a> { - pub(crate) fn new(stream: CopyStream) -> Result, Error> { + pub(crate) fn new(stream: CopyOutStream) -> Result, Error> { let mut it = executor::block_on_stream(Box::pin(stream)); let cur = match it.next() { Some(Ok(cur)) => cur, diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index b90768b2e..467200742 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -8,7 +8,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use tokio_postgres::types::{IsNull, ToSql, Type, FromSql, WrongType}; -use tokio_postgres::{CopyStream, CopyInSink}; +use tokio_postgres::{CopyOutStream, CopyInSink}; use std::io::Cursor; use byteorder::{ByteOrder, BigEndian}; @@ -99,14 +99,14 @@ struct Header { pin_project! { pub struct BinaryCopyOutStream { #[pin] - stream: CopyStream, + stream: CopyOutStream, types: Arc>, header: Option
, } } impl BinaryCopyOutStream { - pub fn new(types: &[Type], stream: CopyStream) -> BinaryCopyOutStream { + pub fn new(types: &[Type], stream: CopyOutStream) -> BinaryCopyOutStream { BinaryCopyOutStream { stream, types: Arc::new(types.to_vec()), diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index bfc2579ff..e25cf7795 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -3,7 +3,7 @@ use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; -use crate::copy_out::CopyStream; +use crate::copy_out::CopyOutStream; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; use crate::slice_iter; @@ -370,7 +370,7 @@ impl Client { &self, statement: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result + ) -> Result where T: ?Sized + ToStatement, { diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 0c032208e..e4bb29e7c 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -15,14 +15,14 @@ pub async fn copy_out<'a, I>( client: &InnerClient, statement: Statement, params: I, -) -> Result +) -> Result where I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = query::encode(client, &statement, params)?; let responses = start(client, buf).await?; - Ok(CopyStream { + Ok(CopyOutStream { responses, _p: PhantomPinned, }) @@ -46,14 +46,14 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result { pin_project! { /// A stream of `COPY ... TO STDOUT` query data. - pub struct CopyStream { + pub struct CopyOutStream { responses: Responses, #[pin] _p: PhantomPinned, } } -impl Stream for CopyStream { +impl Stream for CopyOutStream { type Item = Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 61367f290..d1290ac49 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -106,7 +106,7 @@ pub use crate::client::Client; pub use crate::config::Config; pub use crate::connection::Connection; pub use crate::copy_in::CopyInSink; -pub use crate::copy_out::CopyStream; +pub use crate::copy_out::CopyOutStream; use crate::error::DbError; pub use crate::error::Error; pub use crate::portal::Portal; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index ac44a841b..4ff1c857a 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -1,6 +1,6 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::copy_out::CopyStream; +use crate::copy_out::CopyOutStream; use crate::query::RowStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; @@ -226,7 +226,7 @@ impl<'a> Transaction<'a> { &self, statement: &T, params: &[&(dyn ToSql + Sync)], - ) -> Result + ) -> Result where T: ?Sized + ToStatement, { From b4694471ad9b5c6200f41a5b74ad56bdb95150cb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Nov 2019 18:18:50 -0500 Subject: [PATCH 321/819] Add query_opt Closes #510 --- postgres-native-tls/src/lib.rs | 6 +-- postgres-openssl/src/lib.rs | 6 +-- postgres/src/client.rs | 48 +++++++++++++++++ postgres/src/transaction.rs | 12 +++++ tokio-postgres-binary-copy/src/lib.rs | 37 ++++++++----- tokio-postgres-binary-copy/src/test.rs | 75 ++++++++++++++++++++------ tokio-postgres/src/client.rs | 41 +++++++++++++- tokio-postgres/src/copy_in.rs | 4 +- tokio-postgres/src/transaction.rs | 14 ++++- tokio-postgres/tests/test/main.rs | 14 +++-- 10 files changed, 214 insertions(+), 43 deletions(-) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index e10358d48..207ae6cb2 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -48,17 +48,17 @@ #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use std::task::{Context, Poll}; +use bytes::{Buf, BufMut}; use std::future::Future; use std::io; +use std::mem::MaybeUninit; use std::pin::Pin; +use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; -use bytes::{Buf, BufMut}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; -use std::mem::MaybeUninit; #[cfg(test)] mod test; diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 3a884ffb3..23a653c60 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -42,7 +42,7 @@ #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use std::task::{Poll, Context}; +use bytes::{Buf, BufMut}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; @@ -53,17 +53,17 @@ use openssl::ssl::{ConnectConfiguration, SslRef}; use std::fmt::Debug; use std::future::Future; use std::io; +use std::mem::MaybeUninit; use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; +use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; -use bytes::{Buf, BufMut}; use tokio_openssl::{HandshakeError, SslStream}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; use tokio_postgres::tls::{ChannelBinding, TlsConnect}; -use std::mem::MaybeUninit; #[cfg(test)] mod test; diff --git a/postgres/src/client.rs b/postgres/src/client.rs index fad087fbf..0fcaa85e4 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -119,6 +119,8 @@ impl Client { /// Executes a statement which returns a single row, returning it. /// + /// Returns an error if the query does not return exactly one row. + /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list /// provided, 1-indexed. /// @@ -152,6 +154,52 @@ impl Client { executor::block_on(self.0.query_one(query, params)) } + /// Executes a statement which returns zero or one rows, returning it. + /// + /// Returns an error if the query returns more than one row. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `query` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let baz = true; + /// let row = client.query_opt("SELECT foo FROM bar WHERE baz = $1", &[&baz])?; + /// match row { + /// Some(row) => { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// None => println!("no matching foo"), + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn query_opt( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + executor::block_on(self.0.query_opt(query, params)) + } + /// A maximally-flexible version of `query`. /// /// It takes an iterator of parameters rather than a slice, and returns an iterator of rows rather than collecting diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index b42ba64ff..616a5872c 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -60,6 +60,18 @@ impl<'a> Transaction<'a> { executor::block_on(self.0.query_one(query, params)) } + /// Like `Client::query_opt`. + pub fn query_opt( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + executor::block_on(self.0.query_opt(query, params)) + } + /// Like `Client::query_raw`. pub fn query_raw<'b, T, I>(&mut self, query: &T, params: I) -> Result, Error> where diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres-binary-copy/src/lib.rs index 467200742..6b627c71d 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres-binary-copy/src/lib.rs @@ -1,16 +1,16 @@ -use bytes::{BufMut, Bytes, BytesMut, Buf}; -use futures::{ready, Stream, SinkExt}; +use byteorder::{BigEndian, ByteOrder}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use futures::{ready, SinkExt, Stream}; use pin_project_lite::pin_project; use std::convert::TryFrom; use std::error::Error; +use std::io::Cursor; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use tokio_postgres::types::{IsNull, ToSql, Type, FromSql, WrongType}; -use tokio_postgres::{CopyOutStream, CopyInSink}; -use std::io::Cursor; -use byteorder::{ByteOrder, BigEndian}; +use tokio_postgres::types::{FromSql, IsNull, ToSql, Type, WrongType}; +use tokio_postgres::{CopyInSink, CopyOutStream}; #[cfg(test)] mod test; @@ -49,10 +49,13 @@ impl BinaryCopyInWriter { self.write_raw(values.iter().cloned()).await } - pub async fn write_raw<'a, I>(self: Pin<&mut Self>, values: I) -> Result<(), Box> - where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, + pub async fn write_raw<'a, I>( + self: Pin<&mut Self>, + values: I, + ) -> Result<(), Box> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, { let mut this = self.project(); @@ -126,7 +129,7 @@ impl Stream for BinaryCopyOutStream { Some(Err(e)) => return Poll::Ready(Some(Err(e.into()))), None => return Poll::Ready(Some(Err("unexpected EOF".into()))), }; - let mut chunk= Cursor::new(chunk); + let mut chunk = Cursor::new(chunk); let has_oids = match &this.header { Some(header) => header.has_oids, @@ -200,7 +203,10 @@ pub struct BinaryCopyOutRow { } impl BinaryCopyOutRow { - pub fn try_get<'a, T>(&'a self, idx: usize) -> Result> where T: FromSql<'a> { + pub fn try_get<'a, T>(&'a self, idx: usize) -> Result> + where + T: FromSql<'a>, + { let type_ = &self.types[idx]; if !T::accepts(type_) { return Err(WrongType::new::(type_.clone()).into()); @@ -208,11 +214,14 @@ impl BinaryCopyOutRow { match &self.ranges[idx] { Some(range) => T::from_sql(type_, &self.buf[range.clone()]).map_err(Into::into), - None => T::from_sql_null(type_).map_err(Into::into) + None => T::from_sql_null(type_).map_err(Into::into), } } - pub fn get<'a, T>(&'a self, idx: usize) -> T where T: FromSql<'a> { + pub fn get<'a, T>(&'a self, idx: usize) -> T + where + T: FromSql<'a>, + { match self.try_get(idx) { Ok(value) => value, Err(e) => panic!("error retrieving column {}: {}", idx, e), diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs index 7d8bcd30c..d8d74ccba 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres-binary-copy/src/test.rs @@ -1,7 +1,7 @@ use crate::{BinaryCopyInWriter, BinaryCopyOutStream}; +use futures::{pin_mut, TryStreamExt}; use tokio_postgres::types::Type; use tokio_postgres::{Client, NoTls}; -use futures::{TryStreamExt, pin_mut}; async fn connect() -> Client { let (client, connection) = @@ -23,11 +23,18 @@ async fn write_basic() { .await .unwrap(); - let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let sink = client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .await + .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); pin_mut!(writer); writer.as_mut().write(&[&1i32, &"foobar"]).await.unwrap(); - writer.as_mut().write(&[&2i32, &None::<&str>]).await.unwrap(); + writer + .as_mut() + .write(&[&2i32, &None::<&str>]) + .await + .unwrap(); writer.finish().await.unwrap(); let rows = client @@ -50,12 +57,19 @@ async fn write_many_rows() { .await .unwrap(); - let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let sink = client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .await + .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); pin_mut!(writer); for i in 0..10_000i32 { - writer.as_mut().write(&[&i, &format!("the value for {}", i)]).await.unwrap(); + writer + .as_mut() + .write(&[&i, &format!("the value for {}", i)]) + .await + .unwrap(); } writer.finish().await.unwrap(); @@ -79,12 +93,19 @@ async fn write_big_rows() { .await .unwrap(); - let sink = client.copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]).await.unwrap(); + let sink = client + .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .await + .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::BYTEA]); pin_mut!(writer); for i in 0..2i32 { - writer.as_mut().write(&[&i, &vec![i as u8; 128 * 1024]]).await.unwrap(); + writer + .as_mut() + .write(&[&i, &vec![i as u8; 128 * 1024]]) + .await + .unwrap(); } writer.finish().await.unwrap(); @@ -108,13 +129,19 @@ async fn read_basic() { " CREATE TEMPORARY TABLE foo (id INT, bar TEXT); INSERT INTO foo (id, bar) VALUES (1, 'foobar'), (2, NULL); - " + ", ) .await .unwrap(); - let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream).try_collect::>().await.unwrap(); + let stream = client + .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .await + .unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::(0), 1); @@ -136,8 +163,14 @@ async fn read_many_rows() { .await .unwrap(); - let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream).try_collect::>().await.unwrap(); + let stream = client + .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .await + .unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 10_000); for (i, row) in rows.iter().enumerate() { @@ -155,11 +188,23 @@ async fn read_big_rows() { .await .unwrap(); for i in 0..2i32 { - client.execute("INSERT INTO foo (id, bar) VALUES ($1, $2)", &[&i, &vec![i as u8; 128 * 1024]]).await.unwrap(); + client + .execute( + "INSERT INTO foo (id, bar) VALUES ($1, $2)", + &[&i, &vec![i as u8; 128 * 1024]], + ) + .await + .unwrap(); } - let stream = client.copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]).await.unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::BYTEA], stream).try_collect::>().await.unwrap(); + let stream = client + .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .await + .unwrap(); + let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::BYTEA], stream) + .try_collect::>() + .await + .unwrap(); assert_eq!(rows.len(), 2); for (i, row) in rows.iter().enumerate() { diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index e25cf7795..fa0a4f7aa 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -231,6 +231,8 @@ impl Client { /// Executes a statement which returns a single row, returning it. /// + /// Returns an error if the query does not return exactly one row. + /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list /// provided, 1-indexed. /// @@ -238,8 +240,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// Returns an error if the query does not return exactly one row. - /// /// # Panics /// /// Panics if the number of parameters provided does not match the number expected. @@ -266,6 +266,43 @@ impl Client { Ok(row) } + /// Executes a statements which returns zero or one rows, returning it. + /// + /// Returns an error if the query returns more than one row. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be + /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front + /// with the `prepare` method. + /// + /// # Panics + /// + /// Panics if the number of parameters provided does not match the number expected. + pub async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + let stream = self.query_raw(statement, slice_iter(params)).await?; + pin_mut!(stream); + + let row = match stream.try_next().await? { + Some(row) => row, + None => return Ok(None), + }; + + if stream.try_next().await?.is_some() { + return Err(Error::row_count()); + } + + Ok(Some(row)) + } + /// The maximally flexible version of [`query`]. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index b1cdae598..cd677758a 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -6,13 +6,13 @@ use crate::{query, Error, Statement}; use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; -use futures::{ready, Sink, SinkExt, Stream, StreamExt}; use futures::future; +use futures::{ready, Sink, SinkExt, Stream, StreamExt}; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use postgres_protocol::message::frontend::CopyData; -use std::marker::{PhantomPinned, PhantomData}; +use std::marker::{PhantomData, PhantomPinned}; use std::pin::Pin; use std::task::{Context, Poll}; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 4ff1c857a..1a3aae829 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -13,7 +13,7 @@ use crate::{ ToStatement, }; use bytes::Buf; -use futures::{TryStreamExt}; +use futures::TryStreamExt; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite}; @@ -120,6 +120,18 @@ impl<'a> Transaction<'a> { self.client.query_one(statement, params).await } + /// Like `Client::query_opt`. + pub async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.client.query_opt(statement, params).await + } + /// Like `Client::query_raw`. pub async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 4e6086f48..e510e9644 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -2,7 +2,9 @@ use bytes::{Bytes, BytesMut}; use futures::channel::mpsc; -use futures::{future, stream, StreamExt, SinkExt, pin_mut, join, try_join, FutureExt, TryStreamExt}; +use futures::{ + future, join, pin_mut, stream, try_join, FutureExt, SinkExt, StreamExt, TryStreamExt, +}; use std::fmt::Write; use std::time::Duration; use tokio::net::TcpStream; @@ -422,7 +424,10 @@ async fn copy_in() { let rows = sink.finish().await.unwrap(); assert_eq!(rows, 2); - let rows = client.query("SELECT id, name FROM foo ORDER BY id", &[]).await.unwrap(); + let rows = client + .query("SELECT id, name FROM foo ORDER BY id", &[]) + .await + .unwrap(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].get::<_, i32>(0), 1); @@ -487,7 +492,10 @@ async fn copy_in_error() { sink.send(Bytes::from_static(b"1\tsteven")).await.unwrap(); } - let rows = client.query("SELECT id, name FROM foo ORDER BY id", &[]).await.unwrap(); + let rows = client + .query("SELECT id, name FROM foo ORDER BY id", &[]) + .await + .unwrap(); assert_eq!(rows.len(), 0); } From a22f63b5bbf21bd584f300c581a92b2f9874c99a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 2 Dec 2019 17:55:08 -0800 Subject: [PATCH 322/819] Fix sqlstate generation --- codegen/Cargo.toml | 2 +- codegen/src/type_gen.rs | 2 +- tokio-postgres/src/error/sqlstate.rs | 553 ++++++++++++++------------- 3 files changed, 279 insertions(+), 278 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index a536cbd61..8ff4d58be 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Steven Fackler "] [dependencies] phf_codegen = "0.8" -regex = "1.3.1" +regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 1b3379aba..e7e81c8e7 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -200,7 +200,7 @@ fn parse_types() -> BTreeMap { let name = raw_type["typname"].clone(); let ident = range_vector_re.replace(&name, "_$1"); - let ident = array_re.replace(&ident, "$1_array"); + let ident = array_re.replace(&ident, "${1}_array"); let variant = snake_to_camel(&ident); let ident = ident.to_ascii_uppercase(); diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index b6aff0efe..013a26472 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -824,319 +824,320 @@ impl SqlState { pub const INDEX_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX002")); } #[rustfmt::skip] -static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 732231254413039614, +static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = +::phf::Map { + key: 3213172566270843353, disps: ::phf::Slice::Static(&[ - (0, 7), - (2, 136), - (0, 13), - (2, 32), - (0, 204), - (0, 46), - (0, 154), - (1, 221), - (0, 42), - (2, 246), - (0, 0), - (0, 1), - (1, 18), - (2, 7), + (3, 46), + (0, 6), + (0, 39), (0, 0), - (0, 1), - (0, 4), + (0, 192), + (0, 49), (0, 17), + (1, 138), (0, 2), - (0, 22), - (0, 93), - (1, 92), - (0, 145), + (0, 117), + (0, 0), + (0, 33), + (16, 241), + (0, 20), + (2, 148), + (0, 0), (0, 1), - (9, 81), - (0, 26), - (1, 53), - (0, 62), - (2, 173), - (0, 10), - (0, 3), - (0, 204), - (0, 26), - (0, 2), - (3, 80), - (1, 206), - (1, 61), - (0, 103), + (1, 3), + (0, 27), + (0, 21), + (1, 75), + (13, 187), (0, 3), - (25, 171), + (0, 42), + (0, 12), + (0, 82), + (3, 253), + (0, 219), + (0, 6), + (4, 206), + (2, 16), + (5, 67), + (3, 15), + (0, 76), + (0, 57), + (5, 203), + (22, 134), + (1, 27), + (0, 0), + (1, 113), (0, 0), (0, 0), - (4, 107), - (0, 129), - (0, 114), - (4, 201), - (20, 163), - (14, 72), - (0, 147), - (6, 113), - (5, 170), - (0, 23), + (5, 11), + (0, 45), + (0, 62), + (0, 26), + (1, 158), + (21, 1), + (0, 4), + (5, 64), + (0, 77), + (1, 189), ]), entries: ::phf::Slice::Static(&[ - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("22012", SqlState::DIVISION_BY_ZERO), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("53100", SqlState::DISK_FULL), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("42P20", SqlState::WINDOWING_ERROR), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("42804", SqlState::DATATYPE_MISMATCH), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("XX002", SqlState::INDEX_CORRUPTED), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("58P01", SqlState::UNDEFINED_FILE), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("P0001", SqlState::RAISE_EXCEPTION), + ("08P01", SqlState::PROTOCOL_VIOLATION), ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("XX001", SqlState::DATA_CORRUPTED), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("42701", SqlState::DUPLICATE_COLUMN), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("53200", SqlState::OUT_OF_MEMORY), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("42602", SqlState::INVALID_NAME), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("57P03", SqlState::CANNOT_CONNECT_NOW), ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("42601", SqlState::SYNTAX_ERROR), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("28P01", SqlState::INVALID_PASSWORD), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("P0003", SqlState::TOO_MANY_ROWS), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("23505", SqlState::UNIQUE_VIOLATION), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("22012", SqlState::DIVISION_BY_ZERO), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("23001", SqlState::RESTRICT_VIOLATION), + ("23514", SqlState::CHECK_VIOLATION), + ("42939", SqlState::RESERVED_NAME), ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("HV00K", SqlState::FDW_REPLY_HANDLE), ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("55006", SqlState::OBJECT_IN_USE), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("53100", SqlState::DISK_FULL), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("53200", SqlState::OUT_OF_MEMORY), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("428C9", SqlState::GENERATED_ALWAYS), - ("57P04", SqlState::DATABASE_DROPPED), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("P0002", SqlState::NO_DATA_FOUND), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("58000", SqlState::SYSTEM_ERROR), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("02000", SqlState::NO_DATA), - ("22027", SqlState::TRIM_ERROR), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("42602", SqlState::INVALID_NAME), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("58P02", SqlState::DUPLICATE_FILE), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("42P07", SqlState::DUPLICATE_TABLE), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("22011", SqlState::SUBSTRING_ERROR), ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("XX000", SqlState::INTERNAL_ERROR), - ("42703", SqlState::UNDEFINED_COLUMN), - ("42704", SqlState::UNDEFINED_OBJECT), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("P0003", SqlState::TOO_MANY_ROWS), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("08006", SqlState::CONNECTION_FAILURE), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("P0002", SqlState::NO_DATA_FOUND), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("58030", SqlState::IO_ERROR), ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("01000", SqlState::WARNING), + ("22032", SqlState::INVALID_JSON_TEXT), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("42846", SqlState::CANNOT_COERCE), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("57P01", SqlState::ADMIN_SHUTDOWN), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("42P19", SqlState::INVALID_RECURSION), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("42804", SqlState::DATATYPE_MISMATCH), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), ("0F000", SqlState::LOCATOR_EXCEPTION), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), ("42710", SqlState::DUPLICATE_OBJECT), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("P0004", SqlState::ASSERT_FAILURE), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("XX000", SqlState::INTERNAL_ERROR), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("P0004", SqlState::ASSERT_FAILURE), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("42704", SqlState::UNDEFINED_OBJECT), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("HV000", SqlState::FDW_ERROR), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("42701", SqlState::DUPLICATE_COLUMN), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("XX001", SqlState::DATA_CORRUPTED), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("58P02", SqlState::DUPLICATE_FILE), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("54011", SqlState::TOO_MANY_COLUMNS), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("23P01", SqlState::EXCLUSION_VIOLATION), ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("428C9", SqlState::GENERATED_ALWAYS), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("08006", SqlState::CONNECTION_FAILURE), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("F0000", SqlState::CONFIG_FILE_ERROR), ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("P0001", SqlState::RAISE_EXCEPTION), - ("23514", SqlState::CHECK_VIOLATION), - ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("58P01", SqlState::UNDEFINED_FILE), - ("42P01", SqlState::UNDEFINED_TABLE), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("57014", SqlState::QUERY_CANCELED), ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("58030", SqlState::IO_ERROR), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), - ("42846", SqlState::CANNOT_COERCE), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("P0000", SqlState::PLPGSQL_ERROR), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("F0000", SqlState::CONFIG_FILE_ERROR), + ("02000", SqlState::NO_DATA), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), ("3D000", SqlState::INVALID_CATALOG_NAME), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("HV000", SqlState::FDW_ERROR), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("42803", SqlState::GROUPING_ERROR), - ("22000", SqlState::DATA_EXCEPTION), - ("23505", SqlState::UNIQUE_VIOLATION), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("42939", SqlState::RESERVED_NAME), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("57014", SqlState::QUERY_CANCELED), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("23001", SqlState::RESTRICT_VIOLATION), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("22027", SqlState::TRIM_ERROR), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), ("0L000", SqlState::INVALID_GRANTOR), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("XX002", SqlState::INDEX_CORRUPTED), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("42601", SqlState::SYNTAX_ERROR), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("55006", SqlState::OBJECT_IN_USE), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("42803", SqlState::GROUPING_ERROR), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("42P07", SqlState::DUPLICATE_TABLE), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("58000", SqlState::SYSTEM_ERROR), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("42622", SqlState::NAME_TOO_LONG), + ("20000", SqlState::CASE_NOT_FOUND), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("42P01", SqlState::UNDEFINED_TABLE), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), + ("22000", SqlState::DATA_EXCEPTION), + ("28P01", SqlState::INVALID_PASSWORD), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("42712", SqlState::DUPLICATE_ALIAS), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("42P21", SqlState::COLLATION_MISMATCH), + ("42703", SqlState::UNDEFINED_COLUMN), + ("57P04", SqlState::DATABASE_DROPPED), ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("01000", SqlState::WARNING), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("22032", SqlState::INVALID_JSON_TEXT), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("42809", SqlState::WRONG_OBJECT_TYPE), ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("42P21", SqlState::COLLATION_MISMATCH), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("42P19", SqlState::INVALID_RECURSION), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("42622", SqlState::NAME_TOO_LONG), - ("20000", SqlState::CASE_NOT_FOUND), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("42P20", SqlState::WINDOWING_ERROR), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("22011", SqlState::SUBSTRING_ERROR), + ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("P0000", SqlState::PLPGSQL_ERROR), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), ]), }; From edd9a5aa7f71a209de8964d138eaaf2b0b4ce9e2 Mon Sep 17 00:00:00 2001 From: "Michael P. Jung" Date: Mon, 18 Nov 2019 15:57:09 +0100 Subject: [PATCH 323/819] Add Hash implementation to Type --- codegen/src/type_gen.rs | 4 ++-- postgres-types/src/lib.rs | 6 +++--- postgres-types/src/type_gen.rs | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index e7e81c8e7..99a38ce87 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -266,7 +266,7 @@ use std::sync::Arc; use crate::{{Type, Oid, Kind}}; -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, Hash)] pub struct Other {{ pub name: String, pub oid: Oid, @@ -282,7 +282,7 @@ fn make_enum(w: &mut BufWriter, types: &BTreeMap) { write!( w, " -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Hash)] pub enum Inner {{" ) .unwrap(); diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 47895a0e4..db4aef4c6 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -209,7 +209,7 @@ mod special; mod type_gen; /// A Postgres type. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Hash)] pub struct Type(Inner); impl fmt::Display for Type { @@ -264,7 +264,7 @@ impl Type { } /// Represents the kind of a Postgres type. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Kind { /// A simple type like `VARCHAR` or `INTEGER`. Simple, @@ -285,7 +285,7 @@ pub enum Kind { } /// Information about a field of a composite type. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Field { name: String, type_: Type, diff --git a/postgres-types/src/type_gen.rs b/postgres-types/src/type_gen.rs index 0ff5a2192..8156ccfa5 100644 --- a/postgres-types/src/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{Kind, Oid, Type}; -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, Hash)] pub struct Other { pub name: String, pub oid: Oid, @@ -11,7 +11,7 @@ pub struct Other { pub schema: String, } -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Hash)] pub enum Inner { Bool, Bytea, From b425a28b2f241a43de8180881cf95304d52f648a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Dec 2019 15:26:24 -0800 Subject: [PATCH 324/819] Reenable clippy and rustfmt in CI --- .circleci/config.yml | 6 +++--- postgres/src/config.rs | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 875ab4ffb..a949ef4c4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,14 +28,14 @@ jobs: - image: sfackler/rust-postgres-test:5 steps: - checkout -# - run: rustup component add rustfmt clippy + - run: rustup component add rustfmt clippy - *RESTORE_REGISTRY - run: cargo generate-lockfile - *SAVE_REGISTRY - run: rustc --version > ~/rust-version - *RESTORE_DEPS -# - run: cargo fmt --all -- --check -# - run: cargo clippy --all --all-targets --all-features + - run: cargo fmt --all -- --check + - run: cargo clippy --all --all-targets --all-features - run: cargo test --all - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 34de68e47..34a29fe69 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -17,6 +17,8 @@ pub use tokio_postgres::config::{ChannelBinding, SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; +type Spawn = dyn Fn(Pin + Send>>) + Sync + Send; + /// Connection configuration. /// /// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: @@ -93,7 +95,7 @@ use tokio_postgres::{Error, Socket}; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, - spawner: Option + Send>>) + Sync + Send>>, + spawner: Option>, } impl fmt::Debug for Config { From d6163c088f084b4be8318136c89a03da29687ae6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Dec 2019 15:26:52 -0800 Subject: [PATCH 325/819] Switch off nightly in CI --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a949ef4c4..cee94d9ff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rustlang/rust:nightly + - image: rust:1.39.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:5 From 09a63d62553da1625c0f8385ab15e815b78d587d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Dec 2019 18:25:29 -0800 Subject: [PATCH 326/819] Move to local runtimes per connection This avoids a bunch of context switches and cross-thread synchronization, which ends up improving the performance of a simple query by ~20%, from 252us to 216us. --- postgres/Cargo.toml | 15 +++--- postgres/benches/bench.rs | 17 +++++++ postgres/src/client.rs | 82 +++++++++++++-------------------- postgres/src/config.rs | 63 ++++++------------------- postgres/src/copy_in_writer.rs | 15 +++--- postgres/src/copy_out_reader.rs | 23 +++++---- postgres/src/lib.rs | 22 +-------- postgres/src/row_iter.rs | 16 +++---- postgres/src/transaction.rs | 77 ++++++++++++++++++++----------- 9 files changed, 151 insertions(+), 179 deletions(-) create mode 100644 postgres/benches/bench.rs diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index cab09cea6..e766aa8ee 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -10,6 +10,10 @@ readme = "../README.md" keywords = ["database", "postgres", "postgresql", "sql"] categories = ["database"] +[[bench]] +name = "bench" +harness = false + [package.metadata.docs.rs] all-features = true @@ -17,9 +21,6 @@ all-features = true circle-ci = { repository = "sfackler/rust-postgres" } [features] -default = ["runtime"] -runtime = ["tokio-postgres/runtime", "tokio", "lazy_static", "log"] - with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] @@ -32,11 +33,11 @@ with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres" } -tokio = { version = "0.2", optional = true, features = ["rt-threaded"] } -lazy_static = { version = "1.0", optional = true } -log = { version = "0.4", optional = true } +tokio = { version = "0.2", features = ["rt-core"] } +log = "0.4" [dev-dependencies] +criterion = "0.3" tokio = "0.2" diff --git a/postgres/benches/bench.rs b/postgres/benches/bench.rs new file mode 100644 index 000000000..474d83591 --- /dev/null +++ b/postgres/benches/bench.rs @@ -0,0 +1,17 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use postgres::{Client, NoTls}; + +// spawned: 249us 252us 255us +// local: 214us 216us 219us +fn query_prepared(c: &mut Criterion) { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + let stmt = client.prepare("SELECT $1::INT8").unwrap(); + + c.bench_function("query_prepared", move |b| { + b.iter(|| client.query(&stmt, &[&1i64]).unwrap()) + }); +} + +criterion_group!(group, query_prepared); +criterion_main!(group); diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 0fcaa85e4..6db4cfa42 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,27 +1,25 @@ -#[cfg(feature = "runtime")] -use crate::Config; -use crate::{CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; -use futures::executor; +use crate::{Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; +use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; -#[cfg(feature = "runtime")] -use tokio_postgres::Socket; -use tokio_postgres::{Error, Row, SimpleQueryMessage}; +use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; /// A synchronous PostgreSQL client. -/// -/// This is a lightweight wrapper over the asynchronous tokio_postgres `Client`. -pub struct Client(tokio_postgres::Client); +pub struct Client { + runtime: Runtime, + client: tokio_postgres::Client, +} impl Client { + pub(crate) fn new(runtime: Runtime, client: tokio_postgres::Client) -> Client { + Client { runtime, client } + } + /// A convenience function which parses a configuration string into a `Config` and then connects to the database. /// /// See the documentation for [`Config`] for information about the connection syntax. /// - /// Requires the `runtime` Cargo feature (enabled by default). - /// /// [`Config`]: config/struct.Config.html - #[cfg(feature = "runtime")] pub fn connect(params: &str, tls_mode: T) -> Result where T: MakeTlsConnect + 'static + Send, @@ -78,7 +76,7 @@ impl Client { where T: ?Sized + ToStatement, { - executor::block_on(self.0.execute(query, params)) + self.runtime.block_on(self.client.execute(query, params)) } /// Executes a statement, returning the resulting rows. @@ -114,7 +112,7 @@ impl Client { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query(query, params)) + self.runtime.block_on(self.client.query(query, params)) } /// Executes a statement which returns a single row, returning it. @@ -151,7 +149,7 @@ impl Client { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query_one(query, params)) + self.runtime.block_on(self.client.query_one(query, params)) } /// Executes a statement which returns zero or one rows, returning it. @@ -197,7 +195,7 @@ impl Client { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query_opt(query, params)) + self.runtime.block_on(self.client.query_opt(query, params)) } /// A maximally-flexible version of `query`. @@ -235,8 +233,10 @@ impl Client { I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let stream = executor::block_on(self.0.query_raw(query, params))?; - Ok(RowIter::new(stream)) + let stream = self + .runtime + .block_on(self.client.query_raw(query, params))?; + Ok(RowIter::new(&mut self.runtime, stream)) } /// Creates a new prepared statement. @@ -263,7 +263,7 @@ impl Client { /// # } /// ``` pub fn prepare(&mut self, query: &str) -> Result { - executor::block_on(self.0.prepare(query)) + self.runtime.block_on(self.client.prepare(query)) } /// Like `prepare`, but allows the types of query parameters to be explicitly specified. @@ -294,7 +294,8 @@ impl Client { /// # } /// ``` pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - executor::block_on(self.0.prepare_typed(query, types)) + self.runtime + .block_on(self.client.prepare_typed(query, types)) } /// Executes a `COPY FROM STDIN` statement, returning the number of rows created. @@ -327,8 +328,8 @@ impl Client { where T: ?Sized + ToStatement, { - let sink = executor::block_on(self.0.copy_in(query, params))?; - Ok(CopyInWriter::new(sink)) + let sink = self.runtime.block_on(self.client.copy_in(query, params))?; + Ok(CopyInWriter::new(&mut self.runtime, sink)) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -358,8 +359,8 @@ impl Client { where T: ?Sized + ToStatement, { - let stream = executor::block_on(self.0.copy_out(query, params))?; - CopyOutReader::new(stream) + let stream = self.runtime.block_on(self.client.copy_out(query, params))?; + CopyOutReader::new(&mut self.runtime, stream) } /// Executes a sequence of SQL statements using the simple query protocol. @@ -378,7 +379,7 @@ impl Client { /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { - executor::block_on(self.0.simple_query(query)) + self.runtime.block_on(self.client.simple_query(query)) } /// Executes a sequence of SQL statements using the simple query protocol. @@ -392,7 +393,7 @@ impl Client { /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - executor::block_on(self.0.batch_execute(query)) + self.runtime.block_on(self.client.batch_execute(query)) } /// Begins a new database transaction. @@ -416,35 +417,14 @@ impl Client { /// # } /// ``` pub fn transaction(&mut self) -> Result, Error> { - let transaction = executor::block_on(self.0.transaction())?; - Ok(Transaction::new(transaction)) + let transaction = self.runtime.block_on(self.client.transaction())?; + Ok(Transaction::new(&mut self.runtime, transaction)) } /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. pub fn is_closed(&self) -> bool { - self.0.is_closed() - } - - /// Returns a shared reference to the inner nonblocking client. - pub fn get_ref(&self) -> &tokio_postgres::Client { - &self.0 - } - - /// Returns a mutable reference to the inner nonblocking client. - pub fn get_mut(&mut self) -> &mut tokio_postgres::Client { - &mut self.0 - } - - /// Consumes the client, returning the inner nonblocking client. - pub fn into_inner(self) -> tokio_postgres::Client { - self.0 - } -} - -impl From for Client { - fn from(c: tokio_postgres::Client) -> Client { - Client(c) + self.client.is_closed() } } diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 34a29fe69..d50bd024d 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -2,23 +2,19 @@ //! //! Requires the `runtime` Cargo feature (enabled by default). -use crate::{Client, RUNTIME}; -use futures::{executor, FutureExt}; +use crate::Client; +use futures::FutureExt; use log::error; use std::fmt; -use std::future::Future; use std::path::Path; -use std::pin::Pin; use std::str::FromStr; -use std::sync::{mpsc, Arc}; use std::time::Duration; +use tokio::runtime; #[doc(inline)] pub use tokio_postgres::config::{ChannelBinding, SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; -type Spawn = dyn Fn(Pin + Send>>) + Sync + Send; - /// Connection configuration. /// /// Configuration can be parsed from libpq-style connection strings. These strings come in two formats: @@ -95,7 +91,6 @@ type Spawn = dyn Fn(Pin + Send>>) + Sync + Send; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, - spawner: Option>, } impl fmt::Debug for Config { @@ -117,7 +112,6 @@ impl Config { pub fn new() -> Config { Config { config: tokio_postgres::Config::new(), - spawner: None, } } @@ -242,17 +236,6 @@ impl Config { self } - /// Sets the spawner used to run the connection futures. - /// - /// Defaults to a postgres-specific tokio `Runtime`. - pub fn spawner(&mut self, spawn: F) -> &mut Config - where - F: Fn(Pin + Send>>) + 'static + Sync + Send, - { - self.spawner = Some(Arc::new(spawn)); - self - } - /// Opens a connection to a PostgreSQL database. pub fn connect(&self, tls: T) -> Result where @@ -261,38 +244,23 @@ impl Config { T::Stream: Send, >::Future: Send, { - let (client, connection) = match &self.spawner { - Some(spawn) => { - let (tx, rx) = mpsc::channel(); - let config = self.config.clone(); - let connect = async move { - let r = config.connect(tls).await; - let _ = tx.send(r); - }; - spawn(Box::pin(connect)); - rx.recv().unwrap()? - } - None => { - let connect = self.config.connect(tls); - RUNTIME.handle().enter(|| executor::block_on(connect))? - } - }; + let mut runtime = runtime::Builder::new() + .enable_all() + .basic_scheduler() + .build() + .unwrap(); // FIXME don't unwrap + + let (client, connection) = runtime.block_on(self.config.connect(tls))?; + // FIXME don't spawn this so error reporting is less weird. let connection = connection.map(|r| { if let Err(e) = r { error!("postgres connection error: {}", e) } }); - match &self.spawner { - Some(spawn) => { - spawn(Box::pin(connection)); - } - None => { - RUNTIME.spawn(connection); - } - } + runtime.spawn(connection); - Ok(Client::from(client)) + Ok(Client::new(runtime, client)) } } @@ -306,9 +274,6 @@ impl FromStr for Config { impl From for Config { fn from(config: tokio_postgres::Config) -> Config { - Config { - config, - spawner: None, - } + Config { config } } } diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index b7a2a009e..897d87567 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,18 +1,18 @@ use bytes::{Bytes, BytesMut}; -use futures::{executor, SinkExt}; +use futures::SinkExt; use std::io; use std::io::Write; -use std::marker::PhantomData; use std::pin::Pin; +use tokio::runtime::Runtime; use tokio_postgres::{CopyInSink, Error}; /// The writer returned by the `copy_in` method. /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct CopyInWriter<'a> { + runtime: &'a mut Runtime, sink: Pin>>, buf: BytesMut, - _p: PhantomData<&'a mut ()>, } // no-op impl to extend borrow until drop @@ -21,11 +21,11 @@ impl Drop for CopyInWriter<'_> { } impl<'a> CopyInWriter<'a> { - pub(crate) fn new(sink: CopyInSink) -> CopyInWriter<'a> { + pub(crate) fn new(runtime: &'a mut Runtime, sink: CopyInSink) -> CopyInWriter<'a> { CopyInWriter { + runtime, sink: Box::pin(sink), buf: BytesMut::new(), - _p: PhantomData, } } @@ -34,7 +34,7 @@ impl<'a> CopyInWriter<'a> { /// If this is not called, the copy will be aborted. pub fn finish(mut self) -> Result { self.flush_inner()?; - executor::block_on(self.sink.as_mut().finish()) + self.runtime.block_on(self.sink.as_mut().finish()) } fn flush_inner(&mut self) -> Result<(), Error> { @@ -42,7 +42,8 @@ impl<'a> CopyInWriter<'a> { return Ok(()); } - executor::block_on(self.sink.as_mut().send(self.buf.split().freeze())) + self.runtime + .block_on(self.sink.as_mut().send(self.buf.split().freeze())) } } diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index cd9727252..14f8d6302 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,15 +1,15 @@ use bytes::{Buf, Bytes}; -use futures::executor; +use futures::StreamExt; use std::io::{self, BufRead, Cursor, Read}; -use std::marker::PhantomData; use std::pin::Pin; +use tokio::runtime::Runtime; use tokio_postgres::{CopyOutStream, Error}; /// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { - it: executor::BlockingStream>>, + runtime: &'a mut Runtime, + stream: Pin>, cur: Cursor, - _p: PhantomData<&'a mut ()>, } // no-op impl to extend borrow until drop @@ -18,18 +18,21 @@ impl Drop for CopyOutReader<'_> { } impl<'a> CopyOutReader<'a> { - pub(crate) fn new(stream: CopyOutStream) -> Result, Error> { - let mut it = executor::block_on_stream(Box::pin(stream)); - let cur = match it.next() { + pub(crate) fn new( + runtime: &'a mut Runtime, + stream: CopyOutStream, + ) -> Result, Error> { + let mut stream = Box::pin(stream); + let cur = match runtime.block_on(stream.next()) { Some(Ok(cur)) => cur, Some(Err(e)) => return Err(e), None => Bytes::new(), }; Ok(CopyOutReader { - it, + runtime, + stream, cur: Cursor::new(cur), - _p: PhantomData, }) } } @@ -47,7 +50,7 @@ impl Read for CopyOutReader<'_> { impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { if self.cur.remaining() == 0 { - match self.it.next() { + match self.runtime.block_on(self.stream.next()) { Some(Ok(cur)) => self.cur = Cursor::new(cur), Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), None => {} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 13c9a21d8..bfe187647 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -55,19 +55,11 @@ #![doc(html_root_url = "https://docs.rs/postgres/0.17")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] -#[cfg(feature = "runtime")] -use lazy_static::lazy_static; -#[cfg(feature = "runtime")] -use tokio::runtime::{self, Runtime}; - -#[cfg(feature = "runtime")] -pub use tokio_postgres::Socket; pub use tokio_postgres::{ - error, row, tls, types, Column, Portal, SimpleQueryMessage, Statement, ToStatement, + error, row, tls, types, Column, Portal, SimpleQueryMessage, Socket, Statement, ToStatement, }; pub use crate::client::*; -#[cfg(feature = "runtime")] pub use crate::config::Config; pub use crate::copy_in_writer::CopyInWriter; pub use crate::copy_out_reader::CopyOutReader; @@ -81,23 +73,11 @@ pub use crate::tls::NoTls; pub use crate::transaction::*; mod client; -#[cfg(feature = "runtime")] pub mod config; mod copy_in_writer; mod copy_out_reader; mod row_iter; mod transaction; -#[cfg(feature = "runtime")] #[cfg(test)] mod test; - -#[cfg(feature = "runtime")] -lazy_static! { - static ref RUNTIME: Runtime = runtime::Builder::new() - .thread_name("postgres") - .threaded_scheduler() - .enable_all() - .build() - .unwrap(); -} diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 08da68739..150b5514e 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,13 +1,13 @@ use fallible_iterator::FallibleIterator; -use futures::executor::{self, BlockingStream}; -use std::marker::PhantomData; use std::pin::Pin; +use tokio::runtime::Runtime; use tokio_postgres::{Error, Row, RowStream}; +use futures::StreamExt; /// The iterator returned by `query_raw`. pub struct RowIter<'a> { - it: BlockingStream>>, - _p: PhantomData<&'a mut ()>, + runtime: &'a mut Runtime, + it: Pin>, } // no-op impl to extend the borrow until drop @@ -16,10 +16,10 @@ impl Drop for RowIter<'_> { } impl<'a> RowIter<'a> { - pub(crate) fn new(stream: RowStream) -> RowIter<'a> { + pub(crate) fn new(runtime: &'a mut Runtime, stream: RowStream) -> RowIter<'a> { RowIter { - it: executor::block_on_stream(Box::pin(stream)), - _p: PhantomData, + runtime, + it: Box::pin(stream), } } } @@ -29,6 +29,6 @@ impl FallibleIterator for RowIter<'_> { type Error = Error; fn next(&mut self) -> Result, Error> { - self.it.next().transpose() + self.runtime.block_on(self.it.next()).transpose() } } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 616a5872c..7c2620180 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,5 +1,5 @@ use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; -use futures::executor; +use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -7,33 +7,43 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// /// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made /// in the transaction. Transactions can be nested, with inner transactions implemented via savepoints. -pub struct Transaction<'a>(tokio_postgres::Transaction<'a>); +pub struct Transaction<'a> { + runtime: &'a mut Runtime, + transaction: tokio_postgres::Transaction<'a>, +} impl<'a> Transaction<'a> { - pub(crate) fn new(transaction: tokio_postgres::Transaction<'a>) -> Transaction<'a> { - Transaction(transaction) + pub(crate) fn new( + runtime: &'a mut Runtime, + transaction: tokio_postgres::Transaction<'a>, + ) -> Transaction<'a> { + Transaction { + runtime, + transaction, + } } /// Consumes the transaction, committing all changes made within it. pub fn commit(self) -> Result<(), Error> { - executor::block_on(self.0.commit()) + self.runtime.block_on(self.transaction.commit()) } /// Rolls the transaction back, discarding all changes made within it. /// /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub fn rollback(self) -> Result<(), Error> { - executor::block_on(self.0.rollback()) + self.runtime.block_on(self.transaction.rollback()) } /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { - executor::block_on(self.0.prepare(query)) + self.runtime.block_on(self.transaction.prepare(query)) } /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - executor::block_on(self.0.prepare_typed(query, types)) + self.runtime + .block_on(self.transaction.prepare_typed(query, types)) } /// Like `Client::execute`. @@ -41,7 +51,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - executor::block_on(self.0.execute(query, params)) + self.runtime + .block_on(self.transaction.execute(query, params)) } /// Like `Client::query`. @@ -49,7 +60,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query(query, params)) + self.runtime.block_on(self.transaction.query(query, params)) } /// Like `Client::query_one`. @@ -57,7 +68,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query_one(query, params)) + self.runtime + .block_on(self.transaction.query_one(query, params)) } /// Like `Client::query_opt`. @@ -69,7 +81,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - executor::block_on(self.0.query_opt(query, params)) + self.runtime + .block_on(self.transaction.query_opt(query, params)) } /// Like `Client::query_raw`. @@ -79,8 +92,10 @@ impl<'a> Transaction<'a> { I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let stream = executor::block_on(self.0.query_raw(query, params))?; - Ok(RowIter::new(stream)) + let stream = self + .runtime + .block_on(self.transaction.query_raw(query, params))?; + Ok(RowIter::new(self.runtime, stream)) } /// Binds parameters to a statement, creating a "portal". @@ -97,7 +112,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - executor::block_on(self.0.bind(query, params)) + self.runtime.block_on(self.transaction.bind(query, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -105,7 +120,8 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - executor::block_on(self.0.query_portal(portal, max_rows)) + self.runtime + .block_on(self.transaction.query_portal(portal, max_rows)) } /// The maximally flexible version of `query_portal`. @@ -114,8 +130,10 @@ impl<'a> Transaction<'a> { portal: &Portal, max_rows: i32, ) -> Result, Error> { - let stream = executor::block_on(self.0.query_portal_raw(portal, max_rows))?; - Ok(RowIter::new(stream)) + let stream = self + .runtime + .block_on(self.transaction.query_portal_raw(portal, max_rows))?; + Ok(RowIter::new(self.runtime, stream)) } /// Like `Client::copy_in`. @@ -127,8 +145,10 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let sink = executor::block_on(self.0.copy_in(query, params))?; - Ok(CopyInWriter::new(sink)) + let sink = self + .runtime + .block_on(self.transaction.copy_in(query, params))?; + Ok(CopyInWriter::new(self.runtime, sink)) } /// Like `Client::copy_out`. @@ -140,23 +160,28 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let stream = executor::block_on(self.0.copy_out(query, params))?; - CopyOutReader::new(stream) + let stream = self + .runtime + .block_on(self.transaction.copy_out(query, params))?; + CopyOutReader::new(self.runtime, stream) } /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { - executor::block_on(self.0.simple_query(query)) + self.runtime.block_on(self.transaction.simple_query(query)) } /// Like `Client::batch_execute`. pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - executor::block_on(self.0.batch_execute(query)) + self.runtime.block_on(self.transaction.batch_execute(query)) } /// Like `Client::transaction`. pub fn transaction(&mut self) -> Result, Error> { - let transaction = executor::block_on(self.0.transaction())?; - Ok(Transaction(transaction)) + let transaction = self.runtime.block_on(self.transaction.transaction())?; + Ok(Transaction { + runtime: self.runtime, + transaction, + }) } } From 9950ff42132146e72a6f2821d4a56cf92f5ce90f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 3 Dec 2019 18:29:06 -0800 Subject: [PATCH 327/819] rustfmt --- postgres/src/row_iter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 150b5514e..4bd8e4d8a 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,8 +1,8 @@ use fallible_iterator::FallibleIterator; +use futures::StreamExt; use std::pin::Pin; use tokio::runtime::Runtime; use tokio_postgres::{Error, Row, RowStream}; -use futures::StreamExt; /// The iterator returned by `query_raw`. pub struct RowIter<'a> { From ed8fb9e8e7c195c42c82f0c693bee299c93cd877 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 4 Dec 2019 16:51:55 -0800 Subject: [PATCH 328/819] Update docker image to Postgres 12.1 --- docker-compose.yml | 2 +- docker/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 719bf5934..d44fbe866 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,6 @@ version: '2' services: postgres: - image: "sfackler/rust-postgres-test:5" + image: "sfackler/rust-postgres-test:6" ports: - 5433:5433 diff --git a/docker/Dockerfile b/docker/Dockerfile index bd685d445..1dd7f3db6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres:11 +FROM postgres:12 COPY sql_setup.sh /docker-entrypoint-initdb.d/ From ac4c63f4af57fe11f5e504a1259eb90c3f159f2c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 4 Dec 2019 16:52:20 -0800 Subject: [PATCH 329/819] Upgrade test postgres to 12.1 --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index cee94d9ff..19d845ba4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,7 +25,7 @@ jobs: - image: rust:1.39.0 environment: RUSTFLAGS: -D warnings - - image: sfackler/rust-postgres-test:5 + - image: sfackler/rust-postgres-test:6 steps: - checkout - run: rustup component add rustfmt clippy From 5c33bf8b309fdb8b96fd82057086bec128624a92 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 4 Dec 2019 18:51:59 -0800 Subject: [PATCH 330/819] Don't take parameters in copy_in and copy_out Postgres doesn't support them, so we may as well not provide the option! Closes #523 --- postgres/src/client.rs | 24 +++++++++--------------- postgres/src/test.rs | 8 +++----- postgres/src/transaction.rs | 20 ++++---------------- tokio-postgres-binary-copy/src/test.rs | 12 ++++++------ tokio-postgres/src/client.rs | 18 ++++++++---------- tokio-postgres/src/copy_in.rs | 10 +++------- tokio-postgres/src/copy_out.rs | 14 ++++---------- tokio-postgres/src/transaction.rs | 6 ++---- tokio-postgres/tests/test/main.rs | 8 ++++---- 9 files changed, 43 insertions(+), 77 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 6db4cfa42..e3c9efaa2 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -302,6 +302,7 @@ impl Client { /// /// The `query` argument can either be a `Statement`, or a raw query string. The data in the provided reader is /// passed along to the server verbatim; it is the caller's responsibility to ensure it uses the proper format. + /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. /// @@ -314,27 +315,24 @@ impl Client { /// # fn main() -> Result<(), Box> { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// - /// let mut writer = client.copy_in("COPY people FROM stdin", &[])?; + /// let mut writer = client.copy_in("COPY people FROM stdin")?; /// writer.write_all(b"1\tjohn\n2\tjane\n")?; /// writer.finish()?; /// # Ok(()) /// # } /// ``` - pub fn copy_in( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + pub fn copy_in(&mut self, query: &T) -> Result, Error> where T: ?Sized + ToStatement, { - let sink = self.runtime.block_on(self.client.copy_in(query, params))?; + let sink = self.runtime.block_on(self.client.copy_in(query))?; Ok(CopyInWriter::new(&mut self.runtime, sink)) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. /// - /// The `query` argument can either be a `Statement`, or a raw query string. + /// The `query` argument can either be a `Statement`, or a raw query string. PostgreSQL does not support parameters + /// in `COPY` statements, so this method does not take any. /// /// # Examples /// @@ -345,21 +343,17 @@ impl Client { /// # fn main() -> Result<(), Box> { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// - /// let mut reader = client.copy_out("COPY people TO stdout", &[])?; + /// let mut reader = client.copy_out("COPY people TO stdout")?; /// let mut buf = vec![]; /// reader.read_to_end(&mut buf)?; /// # Ok(()) /// # } /// ``` - pub fn copy_out( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + pub fn copy_out(&mut self, query: &T) -> Result, Error> where T: ?Sized + ToStatement, { - let stream = self.runtime.block_on(self.client.copy_out(query, params))?; + let stream = self.runtime.block_on(self.client.copy_out(query))?; CopyOutReader::new(&mut self.runtime, stream) } diff --git a/postgres/src/test.rs b/postgres/src/test.rs index d376d186f..845d5d694 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -154,7 +154,7 @@ fn copy_in() { .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") .unwrap(); - let mut writer = client.copy_in("COPY foo FROM stdin", &[]).unwrap(); + let mut writer = client.copy_in("COPY foo FROM stdin").unwrap(); writer.write_all(b"1\tsteven\n2\ttimothy").unwrap(); writer.finish().unwrap(); @@ -177,7 +177,7 @@ fn copy_in_abort() { .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") .unwrap(); - let mut writer = client.copy_in("COPY foo FROM stdin", &[]).unwrap(); + let mut writer = client.copy_in("COPY foo FROM stdin").unwrap(); writer.write_all(b"1\tsteven\n2\ttimothy").unwrap(); drop(writer); @@ -199,9 +199,7 @@ fn copy_out() { ) .unwrap(); - let mut reader = client - .copy_out("COPY foo (id, name) TO STDOUT", &[]) - .unwrap(); + let mut reader = client.copy_out("COPY foo (id, name) TO STDOUT").unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); drop(reader); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 7c2620180..ece8de99b 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -137,32 +137,20 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_in`. - pub fn copy_in( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + pub fn copy_in(&mut self, query: &T) -> Result, Error> where T: ?Sized + ToStatement, { - let sink = self - .runtime - .block_on(self.transaction.copy_in(query, params))?; + let sink = self.runtime.block_on(self.transaction.copy_in(query))?; Ok(CopyInWriter::new(self.runtime, sink)) } /// Like `Client::copy_out`. - pub fn copy_out( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + pub fn copy_out(&mut self, query: &T) -> Result, Error> where T: ?Sized + ToStatement, { - let stream = self - .runtime - .block_on(self.transaction.copy_out(query, params))?; + let stream = self.runtime.block_on(self.transaction.copy_out(query))?; CopyOutReader::new(self.runtime, stream) } diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres-binary-copy/src/test.rs index d8d74ccba..3b0450868 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres-binary-copy/src/test.rs @@ -24,7 +24,7 @@ async fn write_basic() { .unwrap(); let sink = client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); @@ -58,7 +58,7 @@ async fn write_many_rows() { .unwrap(); let sink = client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::TEXT]); @@ -94,7 +94,7 @@ async fn write_big_rows() { .unwrap(); let sink = client - .copy_in("COPY foo (id, bar) FROM STDIN BINARY", &[]) + .copy_in("COPY foo (id, bar) FROM STDIN BINARY") .await .unwrap(); let writer = BinaryCopyInWriter::new(sink, &[Type::INT4, Type::BYTEA]); @@ -135,7 +135,7 @@ async fn read_basic() { .unwrap(); let stream = client - .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) @@ -164,7 +164,7 @@ async fn read_many_rows() { .unwrap(); let stream = client - .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) @@ -198,7 +198,7 @@ async fn read_big_rows() { } let stream = client - .copy_out("COPY foo (id, bar) TO STDIN BINARY", &[]) + .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::BYTEA], stream) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index fa0a4f7aa..8c9e133d5 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -378,42 +378,40 @@ impl Client { /// Executes a `COPY FROM STDIN` statement, returning a sink used to write the copy data. /// - /// The copy *must* be explicitly completed via the `Sink::close` or `finish` methods. If it is - /// not, the copy will be aborted. + /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. The copy *must* + /// be explicitly completed via the `Sink::close` or `finish` methods. If it is not, the copy will be aborted. /// /// # Panics /// - /// Panics if the number of parameters provided does not match the number expected. + /// Panics if the statement contains parameters. pub async fn copy_in( &self, statement: &T, - params: &[&(dyn ToSql + Sync)], ) -> Result, Error> where T: ?Sized + ToStatement, U: Buf + 'static + Send, { let statement = statement.__convert().into_statement(self).await?; - let params = slice_iter(params); - copy_in::copy_in(self.inner(), statement, params).await + copy_in::copy_in(self.inner(), statement).await } /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. /// + /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. + /// /// # Panics /// - /// Panics if the number of parameters provided does not match the number expected. + /// Panics if the statement contains parameters. pub async fn copy_out( &self, statement: &T, - params: &[&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, { let statement = statement.__convert().into_statement(self).await?; - let params = slice_iter(params); - copy_out::copy_out(self.inner(), statement, params).await + copy_out::copy_out(self.inner(), statement).await } /// Executes a sequence of SQL statements using the simple query protocol, returning the resulting rows. diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index cd677758a..cfaa32bd4 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,8 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::ToSql; -use crate::{query, Error, Statement}; +use crate::{query, Error, Statement, slice_iter}; use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; @@ -196,17 +195,14 @@ where } } -pub async fn copy_in<'a, I, T>( +pub async fn copy_in( client: &InnerClient, statement: Statement, - params: I, ) -> Result, Error> where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, T: Buf + 'static + Send, { - let buf = query::encode(client, &statement, params)?; + let buf = query::encode(client, &statement, slice_iter(&[]))?; let (mut sender, receiver) = mpsc::channel(1); let receiver = CopyInReceiver::new(receiver); diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index e4bb29e7c..eb9cfb4d0 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,8 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::ToSql; -use crate::{query, Error, Statement}; +use crate::{query, Error, Statement, slice_iter}; use bytes::Bytes; use futures::{ready, Stream}; use pin_project_lite::pin_project; @@ -11,16 +10,11 @@ use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -pub async fn copy_out<'a, I>( +pub async fn copy_out( client: &InnerClient, statement: Statement, - params: I, -) -> Result -where - I: IntoIterator, - I::IntoIter: ExactSizeIterator, -{ - let buf = query::encode(client, &statement, params)?; +) -> Result { + let buf = query::encode(client, &statement, slice_iter(&[]))?; let responses = start(client, buf).await?; Ok(CopyOutStream { responses, diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 1a3aae829..4702b9173 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -224,25 +224,23 @@ impl<'a> Transaction<'a> { pub async fn copy_in( &self, statement: &T, - params: &[&(dyn ToSql + Sync)], ) -> Result, Error> where T: ?Sized + ToStatement, U: Buf + 'static + Send, { - self.client.copy_in(statement, params).await + self.client.copy_in(statement).await } /// Like `Client::copy_out`. pub async fn copy_out( &self, statement: &T, - params: &[&(dyn ToSql + Sync)], ) -> Result where T: ?Sized + ToStatement, { - self.client.copy_out(statement, params).await + self.client.copy_out(statement).await } /// Like `Client::simple_query`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index e510e9644..79acf2884 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -418,7 +418,7 @@ async fn copy_in() { .into_iter() .map(Ok::<_, Error>), ); - let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); pin_mut!(sink); sink.send_all(&mut stream).await.unwrap(); let rows = sink.finish().await.unwrap(); @@ -465,7 +465,7 @@ async fn copy_in_large() { .map(Ok::<_, Error>), ); - let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); pin_mut!(sink); sink.send_all(&mut stream).await.unwrap(); let rows = sink.finish().await.unwrap(); @@ -487,7 +487,7 @@ async fn copy_in_error() { .unwrap(); { - let sink = client.copy_in("COPY foo FROM STDIN", &[]).await.unwrap(); + let sink = client.copy_in("COPY foo FROM STDIN").await.unwrap(); pin_mut!(sink); sink.send(Bytes::from_static(b"1\tsteven")).await.unwrap(); } @@ -517,7 +517,7 @@ async fn copy_out() { let stmt = client.prepare("COPY foo TO STDOUT").await.unwrap(); let data = client - .copy_out(&stmt, &[]) + .copy_out(&stmt) .await .unwrap() .try_fold(BytesMut::new(), |mut buf, chunk| { From 0c84ed9f8201f4e5b4803199a24afa2c9f3723b2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 4 Dec 2019 18:59:22 -0800 Subject: [PATCH 331/819] rustfmt --- tokio-postgres/src/client.rs | 10 ++-------- tokio-postgres/src/copy_in.rs | 7 ++----- tokio-postgres/src/copy_out.rs | 7 ++----- tokio-postgres/src/transaction.rs | 10 ++-------- 4 files changed, 8 insertions(+), 26 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 8c9e133d5..68ccaf371 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -384,10 +384,7 @@ impl Client { /// # Panics /// /// Panics if the statement contains parameters. - pub async fn copy_in( - &self, - statement: &T, - ) -> Result, Error> + pub async fn copy_in(&self, statement: &T) -> Result, Error> where T: ?Sized + ToStatement, U: Buf + 'static + Send, @@ -403,10 +400,7 @@ impl Client { /// # Panics /// /// Panics if the statement contains parameters. - pub async fn copy_out( - &self, - statement: &T, - ) -> Result + pub async fn copy_out(&self, statement: &T) -> Result where T: ?Sized + ToStatement, { diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index cfaa32bd4..f9b8f5999 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,7 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::{query, Error, Statement, slice_iter}; +use crate::{query, slice_iter, Error, Statement}; use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; @@ -195,10 +195,7 @@ where } } -pub async fn copy_in( - client: &InnerClient, - statement: Statement, -) -> Result, Error> +pub async fn copy_in(client: &InnerClient, statement: Statement) -> Result, Error> where T: Buf + 'static + Send, { diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index eb9cfb4d0..62c1a1a4f 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -1,7 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::{query, Error, Statement, slice_iter}; +use crate::{query, slice_iter, Error, Statement}; use bytes::Bytes; use futures::{ready, Stream}; use pin_project_lite::pin_project; @@ -10,10 +10,7 @@ use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -pub async fn copy_out( - client: &InnerClient, - statement: Statement, -) -> Result { +pub async fn copy_out(client: &InnerClient, statement: Statement) -> Result { let buf = query::encode(client, &statement, slice_iter(&[]))?; let responses = start(client, buf).await?; Ok(CopyOutStream { diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 4702b9173..aaf7efd85 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -221,10 +221,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_in`. - pub async fn copy_in( - &self, - statement: &T, - ) -> Result, Error> + pub async fn copy_in(&self, statement: &T) -> Result, Error> where T: ?Sized + ToStatement, U: Buf + 'static + Send, @@ -233,10 +230,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::copy_out`. - pub async fn copy_out( - &self, - statement: &T, - ) -> Result + pub async fn copy_out(&self, statement: &T) -> Result where T: ?Sized + ToStatement, { From bf8b335d2b3b4f9c5058355b24b393cce6f6b48a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 8 Dec 2019 18:30:47 -0800 Subject: [PATCH 332/819] Move binary copy stuff directly into main crate --- Cargo.toml | 1 - tokio-postgres-binary-copy/Cargo.toml | 16 --- tokio-postgres/Cargo.toml | 1 + .../src/binary_copy.rs | 114 ++++++++++++------ tokio-postgres/src/lib.rs | 1 + .../tests/test/binary_copy.rs | 33 ++--- tokio-postgres/tests/test/main.rs | 1 + 7 files changed, 91 insertions(+), 76 deletions(-) delete mode 100644 tokio-postgres-binary-copy/Cargo.toml rename tokio-postgres-binary-copy/src/lib.rs => tokio-postgres/src/binary_copy.rs (57%) rename tokio-postgres-binary-copy/src/test.rs => tokio-postgres/tests/test/binary_copy.rs (86%) diff --git a/Cargo.toml b/Cargo.toml index 4d8dbe78d..4752836a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,6 @@ members = [ "postgres-protocol", "postgres-types", "tokio-postgres", - "tokio-postgres-binary-copy", ] [profile.release] diff --git a/tokio-postgres-binary-copy/Cargo.toml b/tokio-postgres-binary-copy/Cargo.toml deleted file mode 100644 index a8d44bf55..000000000 --- a/tokio-postgres-binary-copy/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "tokio-postgres-binary-copy" -version = "0.1.0" -authors = ["Steven Fackler "] -edition = "2018" - -[dependencies] -byteorder = "1.0" -bytes = "0.5" -futures = "0.3" -pin-project-lite = "0.1" -tokio-postgres = { version = "=0.5.0-alpha.2", default-features = false, path = "../tokio-postgres" } - -[dev-dependencies] -tokio = { version = "0.2", features = ["full"] } -tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres" } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 3568300c3..d48b9e652 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -37,6 +37,7 @@ with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] [dependencies] bytes = "0.5" +byteorder = "1.0" fallible-iterator = "0.2" futures = "0.3" log = "0.4" diff --git a/tokio-postgres-binary-copy/src/lib.rs b/tokio-postgres/src/binary_copy.rs similarity index 57% rename from tokio-postgres-binary-copy/src/lib.rs rename to tokio-postgres/src/binary_copy.rs index 6b627c71d..231f202d8 100644 --- a/tokio-postgres-binary-copy/src/lib.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -1,24 +1,26 @@ +//! Utilities for working with the PostgreSQL binary copy format. + +use crate::types::{FromSql, IsNull, ToSql, Type, WrongType}; +use crate::{slice_iter, CopyInSink, CopyOutStream, Error}; use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures::{ready, SinkExt, Stream}; use pin_project_lite::pin_project; use std::convert::TryFrom; -use std::error::Error; +use std::io; use std::io::Cursor; use std::ops::Range; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use tokio_postgres::types::{FromSql, IsNull, ToSql, Type, WrongType}; -use tokio_postgres::{CopyInSink, CopyOutStream}; - -#[cfg(test)] -mod test; const MAGIC: &[u8] = b"PGCOPY\n\xff\r\n\0"; const HEADER_LEN: usize = MAGIC.len() + 4 + 4; pin_project! { + /// A type which serializes rows into the PostgreSQL binary copy format. + /// + /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct BinaryCopyInWriter { #[pin] sink: CopyInSink, @@ -28,10 +30,10 @@ pin_project! { } impl BinaryCopyInWriter { + /// Creates a new writer which will write rows of the provided types to the provided sink. pub fn new(sink: CopyInSink, types: &[Type]) -> BinaryCopyInWriter { let mut buf = BytesMut::new(); - buf.reserve(HEADER_LEN); - buf.put_slice(MAGIC); // magic + buf.put_slice(MAGIC); buf.put_i32(0); // flags buf.put_i32(0); // header extension @@ -42,19 +44,23 @@ impl BinaryCopyInWriter { } } - pub async fn write( - self: Pin<&mut Self>, - values: &[&(dyn ToSql + Send)], - ) -> Result<(), Box> { - self.write_raw(values.iter().cloned()).await + /// Writes a single row. + /// + /// # Panics + /// + /// Panics if the number of values provided does not match the number expected. + pub async fn write(self: Pin<&mut Self>, values: &[&(dyn ToSql + Sync)]) -> Result<(), Error> { + self.write_raw(slice_iter(values)).await } - pub async fn write_raw<'a, I>( - self: Pin<&mut Self>, - values: I, - ) -> Result<(), Box> + /// A maximally-flexible version of `write`. + /// + /// # Panics + /// + /// Panics if the number of values provided does not match the number expected. + pub async fn write_raw<'a, I>(self: Pin<&mut Self>, values: I) -> Result<(), Error> where - I: IntoIterator, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let mut this = self.project(); @@ -69,12 +75,16 @@ impl BinaryCopyInWriter { this.buf.put_i16(this.types.len() as i16); - for (value, type_) in values.zip(this.types) { + for (i, (value, type_)) in values.zip(this.types).enumerate() { let idx = this.buf.len(); this.buf.put_i32(0); - let len = match value.to_sql_checked(type_, this.buf)? { + let len = match value + .to_sql_checked(type_, this.buf) + .map_err(|e| Error::to_sql(e, i))? + { IsNull::Yes => -1, - IsNull::No => i32::try_from(this.buf.len() - idx - 4)?, + IsNull::No => i32::try_from(this.buf.len() - idx - 4) + .map_err(|e| Error::encode(io::Error::new(io::ErrorKind::InvalidInput, e)))?, }; BigEndian::write_i32(&mut this.buf[idx..], len); } @@ -86,7 +96,10 @@ impl BinaryCopyInWriter { Ok(()) } - pub async fn finish(self: Pin<&mut Self>) -> Result { + /// Completes the copy, returning the number of rows added. + /// + /// This method *must* be used to complete the copy process. If it is not, the copy will be aborted. + pub async fn finish(self: Pin<&mut Self>) -> Result { let mut this = self.project(); this.buf.put_i16(-1); @@ -100,6 +113,7 @@ struct Header { } pin_project! { + /// A stream of rows deserialized from the PostgreSQL binary copy format. pub struct BinaryCopyOutStream { #[pin] stream: CopyOutStream, @@ -109,7 +123,8 @@ pin_project! { } impl BinaryCopyOutStream { - pub fn new(types: &[Type], stream: CopyOutStream) -> BinaryCopyOutStream { + /// Creates a stream from a raw copy out stream and the types of the columns being returned. + pub fn new(stream: CopyOutStream, types: &[Type]) -> BinaryCopyOutStream { BinaryCopyOutStream { stream, types: Arc::new(types.to_vec()), @@ -119,15 +134,15 @@ impl BinaryCopyOutStream { } impl Stream for BinaryCopyOutStream { - type Item = Result>; + type Item = Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); let chunk = match ready!(this.stream.poll_next(cx)) { Some(Ok(chunk)) => chunk, - Some(Err(e)) => return Poll::Ready(Some(Err(e.into()))), - None => return Poll::Ready(Some(Err("unexpected EOF".into()))), + Some(Err(e)) => return Poll::Ready(Some(Err(e))), + None => return Poll::Ready(Some(Err(Error::closed()))), }; let mut chunk = Cursor::new(chunk); @@ -136,7 +151,10 @@ impl Stream for BinaryCopyOutStream { None => { check_remaining(&chunk, HEADER_LEN)?; if &chunk.bytes()[..MAGIC.len()] != MAGIC { - return Poll::Ready(Some(Err("invalid magic value".into()))); + return Poll::Ready(Some(Err(Error::parse(io::Error::new( + io::ErrorKind::InvalidData, + "invalid magic value", + ))))); } chunk.advance(MAGIC.len()); @@ -162,7 +180,10 @@ impl Stream for BinaryCopyOutStream { len += 1; } if len as usize != this.types.len() { - return Poll::Ready(Some(Err("unexpected tuple size".into()))); + return Poll::Ready(Some(Err(Error::parse(io::Error::new( + io::ErrorKind::InvalidInput, + format!("expected {} values but got {}", this.types.len(), len), + ))))); } let mut ranges = vec![]; @@ -188,14 +209,18 @@ impl Stream for BinaryCopyOutStream { } } -fn check_remaining(buf: &impl Buf, len: usize) -> Result<(), Box> { +fn check_remaining(buf: &Cursor, len: usize) -> Result<(), Error> { if buf.remaining() < len { - Err("unexpected EOF".into()) + Err(Error::parse(io::Error::new( + io::ErrorKind::UnexpectedEof, + "unexpected EOF", + ))) } else { Ok(()) } } +/// A row of data parsed from a binary copy out stream. pub struct BinaryCopyOutRow { buf: Bytes, ranges: Vec>>, @@ -203,21 +228,36 @@ pub struct BinaryCopyOutRow { } impl BinaryCopyOutRow { - pub fn try_get<'a, T>(&'a self, idx: usize) -> Result> + /// Like `get`, but returns a `Result` rather than panicking. + pub fn try_get<'a, T>(&'a self, idx: usize) -> Result where T: FromSql<'a>, { - let type_ = &self.types[idx]; + let type_ = match self.types.get(idx) { + Some(type_) => type_, + None => return Err(Error::column(idx.to_string())), + }; + if !T::accepts(type_) { - return Err(WrongType::new::(type_.clone()).into()); + return Err(Error::from_sql( + Box::new(WrongType::new::(type_.clone())), + idx, + )); } - match &self.ranges[idx] { - Some(range) => T::from_sql(type_, &self.buf[range.clone()]).map_err(Into::into), - None => T::from_sql_null(type_).map_err(Into::into), - } + let r = match &self.ranges[idx] { + Some(range) => T::from_sql(type_, &self.buf[range.clone()]), + None => T::from_sql_null(type_), + }; + + r.map_err(|e| Error::from_sql(e, idx)) } + /// Deserializes a value from the row. + /// + /// # Panics + /// + /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. pub fn get<'a, T>(&'a self, idx: usize) -> T where T: FromSql<'a>, diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d1290ac49..70fee32f9 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -123,6 +123,7 @@ pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; use crate::types::ToSql; +pub mod binary_copy; mod bind; #[cfg(feature = "runtime")] mod cancel_query; diff --git a/tokio-postgres-binary-copy/src/test.rs b/tokio-postgres/tests/test/binary_copy.rs similarity index 86% rename from tokio-postgres-binary-copy/src/test.rs rename to tokio-postgres/tests/test/binary_copy.rs index 3b0450868..ab69742dc 100644 --- a/tokio-postgres-binary-copy/src/test.rs +++ b/tokio-postgres/tests/test/binary_copy.rs @@ -1,22 +1,11 @@ -use crate::{BinaryCopyInWriter, BinaryCopyOutStream}; +use crate::connect; use futures::{pin_mut, TryStreamExt}; +use tokio_postgres::binary_copy::{BinaryCopyInWriter, BinaryCopyOutStream}; use tokio_postgres::types::Type; -use tokio_postgres::{Client, NoTls}; - -async fn connect() -> Client { - let (client, connection) = - tokio_postgres::connect("host=localhost port=5433 user=postgres", NoTls) - .await - .unwrap(); - tokio::spawn(async { - connection.await.unwrap(); - }); - client -} #[tokio::test] async fn write_basic() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar TEXT)") @@ -50,7 +39,7 @@ async fn write_basic() { #[tokio::test] async fn write_many_rows() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar TEXT)") @@ -86,7 +75,7 @@ async fn write_many_rows() { #[tokio::test] async fn write_big_rows() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)") @@ -122,7 +111,7 @@ async fn write_big_rows() { #[tokio::test] async fn read_basic() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute( @@ -138,7 +127,7 @@ async fn read_basic() { .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) + let rows = BinaryCopyOutStream::new(stream, &[Type::INT4, Type::TEXT]) .try_collect::>() .await .unwrap(); @@ -152,7 +141,7 @@ async fn read_basic() { #[tokio::test] async fn read_many_rows() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute( @@ -167,7 +156,7 @@ async fn read_many_rows() { .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::TEXT], stream) + let rows = BinaryCopyOutStream::new(stream, &[Type::INT4, Type::TEXT]) .try_collect::>() .await .unwrap(); @@ -181,7 +170,7 @@ async fn read_many_rows() { #[tokio::test] async fn read_big_rows() { - let client = connect().await; + let client = connect("user=postgres").await; client .batch_execute("CREATE TEMPORARY TABLE foo (id INT, bar BYTEA)") @@ -201,7 +190,7 @@ async fn read_big_rows() { .copy_out("COPY foo (id, bar) TO STDIN BINARY") .await .unwrap(); - let rows = BinaryCopyOutStream::new(&[Type::INT4, Type::BYTEA], stream) + let rows = BinaryCopyOutStream::new(stream, &[Type::INT4, Type::BYTEA]) .try_collect::>() .await .unwrap(); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 79acf2884..6f87dac51 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -14,6 +14,7 @@ use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; use tokio_postgres::{AsyncMessage, Client, Config, Connection, Error, SimpleQueryMessage}; +mod binary_copy; mod parse; #[cfg(feature = "runtime")] mod runtime; From 4a5a2778786341e855c238d09eeb59b1c41fae4e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 10 Dec 2019 16:34:04 -0800 Subject: [PATCH 333/819] query_opt test --- tokio-postgres/tests/test/main.rs | 43 +++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 6f87dac51..231749526 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -667,11 +667,11 @@ async fn query_one() { client .batch_execute( " - CREATE TEMPORARY TABLE foo ( - name TEXT - ); - INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('carol'); - ", + CREATE TEMPORARY TABLE foo ( + name TEXT + ); + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('carol'); + ", ) .await .unwrap(); @@ -691,3 +691,36 @@ async fn query_one() { .err() .unwrap(); } + +#[tokio::test] +async fn query_opt() { + let client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo ( + name TEXT + ); + INSERT INTO foo (name) VALUES ('alice'), ('bob'), ('carol'); + ", + ) + .await + .unwrap(); + + assert!(client + .query_opt("SELECT * FROM foo WHERE name = 'dave'", &[]) + .await + .unwrap() + .is_none()); + client + .query_opt("SELECT * FROM foo WHERE name = 'alice'", &[]) + .await + .unwrap() + .unwrap(); + client + .query_one("SELECT * FROM foo", &[]) + .await + .err() + .unwrap(); +} From cc8d8fe7341333126b49bccc2e8819cbd2211582 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 15 Dec 2019 15:58:50 -0800 Subject: [PATCH 334/819] Unify no-op drop impls --- postgres/src/client.rs | 34 ++++++++++++++++++++++++++++++--- postgres/src/copy_in_writer.rs | 11 +++-------- postgres/src/copy_out_reader.rs | 11 +++-------- postgres/src/row_iter.rs | 6 +++--- postgres/src/transaction.rs | 14 +++++++++----- 5 files changed, 49 insertions(+), 27 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e3c9efaa2..2621cbf70 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,9 +1,33 @@ use crate::{Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; +use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; +pub(crate) struct Rt<'a>(pub &'a mut Runtime); + +// no-op impl to extend the borrow until drop +impl Drop for Rt<'_> { + fn drop(&mut self) {} +} + +impl Deref for Rt<'_> { + type Target = Runtime; + + #[inline] + fn deref(&self) -> &Runtime { + self.0 + } +} + +impl DerefMut for Rt<'_> { + #[inline] + fn deref_mut(&mut self) -> &mut Runtime { + self.0 + } +} + /// A synchronous PostgreSQL client. pub struct Client { runtime: Runtime, @@ -38,6 +62,10 @@ impl Client { Config::new() } + fn rt(&mut self) -> Rt<'_> { + Rt(&mut self.runtime) + } + /// Executes a statement, returning the number of rows modified. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list @@ -236,7 +264,7 @@ impl Client { let stream = self .runtime .block_on(self.client.query_raw(query, params))?; - Ok(RowIter::new(&mut self.runtime, stream)) + Ok(RowIter::new(self.rt(), stream)) } /// Creates a new prepared statement. @@ -326,7 +354,7 @@ impl Client { T: ?Sized + ToStatement, { let sink = self.runtime.block_on(self.client.copy_in(query))?; - Ok(CopyInWriter::new(&mut self.runtime, sink)) + Ok(CopyInWriter::new(self.rt(), sink)) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -354,7 +382,7 @@ impl Client { T: ?Sized + ToStatement, { let stream = self.runtime.block_on(self.client.copy_out(query))?; - CopyOutReader::new(&mut self.runtime, stream) + CopyOutReader::new(self.rt(), stream) } /// Executes a sequence of SQL statements using the simple query protocol. diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index 897d87567..9a9e4899e 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,27 +1,22 @@ +use crate::Rt; use bytes::{Bytes, BytesMut}; use futures::SinkExt; use std::io; use std::io::Write; use std::pin::Pin; -use tokio::runtime::Runtime; use tokio_postgres::{CopyInSink, Error}; /// The writer returned by the `copy_in` method. /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct CopyInWriter<'a> { - runtime: &'a mut Runtime, + runtime: Rt<'a>, sink: Pin>>, buf: BytesMut, } -// no-op impl to extend borrow until drop -impl Drop for CopyInWriter<'_> { - fn drop(&mut self) {} -} - impl<'a> CopyInWriter<'a> { - pub(crate) fn new(runtime: &'a mut Runtime, sink: CopyInSink) -> CopyInWriter<'a> { + pub(crate) fn new(runtime: Rt<'a>, sink: CopyInSink) -> CopyInWriter<'a> { CopyInWriter { runtime, sink: Box::pin(sink), diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 14f8d6302..c1bc2ff6e 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,25 +1,20 @@ +use crate::Rt; use bytes::{Buf, Bytes}; use futures::StreamExt; use std::io::{self, BufRead, Cursor, Read}; use std::pin::Pin; -use tokio::runtime::Runtime; use tokio_postgres::{CopyOutStream, Error}; /// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { - runtime: &'a mut Runtime, + runtime: Rt<'a>, stream: Pin>, cur: Cursor, } -// no-op impl to extend borrow until drop -impl Drop for CopyOutReader<'_> { - fn drop(&mut self) {} -} - impl<'a> CopyOutReader<'a> { pub(crate) fn new( - runtime: &'a mut Runtime, + mut runtime: Rt<'a>, stream: CopyOutStream, ) -> Result, Error> { let mut stream = Box::pin(stream); diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 4bd8e4d8a..4be5f3477 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,12 +1,12 @@ +use crate::Rt; use fallible_iterator::FallibleIterator; use futures::StreamExt; use std::pin::Pin; -use tokio::runtime::Runtime; use tokio_postgres::{Error, Row, RowStream}; /// The iterator returned by `query_raw`. pub struct RowIter<'a> { - runtime: &'a mut Runtime, + runtime: Rt<'a>, it: Pin>, } @@ -16,7 +16,7 @@ impl Drop for RowIter<'_> { } impl<'a> RowIter<'a> { - pub(crate) fn new(runtime: &'a mut Runtime, stream: RowStream) -> RowIter<'a> { + pub(crate) fn new(runtime: Rt<'a>, stream: RowStream) -> RowIter<'a> { RowIter { runtime, it: Box::pin(stream), diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index ece8de99b..2b397ca52 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,4 +1,4 @@ -use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; +use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement}; use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -23,6 +23,10 @@ impl<'a> Transaction<'a> { } } + fn rt(&mut self) -> Rt<'_> { + Rt(self.runtime) + } + /// Consumes the transaction, committing all changes made within it. pub fn commit(self) -> Result<(), Error> { self.runtime.block_on(self.transaction.commit()) @@ -95,7 +99,7 @@ impl<'a> Transaction<'a> { let stream = self .runtime .block_on(self.transaction.query_raw(query, params))?; - Ok(RowIter::new(self.runtime, stream)) + Ok(RowIter::new(self.rt(), stream)) } /// Binds parameters to a statement, creating a "portal". @@ -133,7 +137,7 @@ impl<'a> Transaction<'a> { let stream = self .runtime .block_on(self.transaction.query_portal_raw(portal, max_rows))?; - Ok(RowIter::new(self.runtime, stream)) + Ok(RowIter::new(self.rt(), stream)) } /// Like `Client::copy_in`. @@ -142,7 +146,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { let sink = self.runtime.block_on(self.transaction.copy_in(query))?; - Ok(CopyInWriter::new(self.runtime, sink)) + Ok(CopyInWriter::new(self.rt(), sink)) } /// Like `Client::copy_out`. @@ -151,7 +155,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { let stream = self.runtime.block_on(self.transaction.copy_out(query))?; - CopyOutReader::new(self.runtime, stream) + CopyOutReader::new(self.rt(), stream) } /// Like `Client::simple_query`. From a254e6e9e9c9c6f0d07f95ae9de8635e6957e670 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 15 Dec 2019 17:01:53 -0800 Subject: [PATCH 335/819] Blocking binary copy support --- postgres/src/binary_copy.rs | 92 +++++++++++++++++++++++++++++++++ postgres/src/client.rs | 2 +- postgres/src/copy_in_writer.rs | 12 ++--- postgres/src/copy_out_reader.rs | 40 ++++++-------- postgres/src/lazy_pin.rs | 28 ++++++++++ postgres/src/lib.rs | 2 + postgres/src/test.rs | 53 +++++++++++++++++++ postgres/src/transaction.rs | 2 +- tokio-postgres/src/copy_out.rs | 3 ++ 9 files changed, 201 insertions(+), 33 deletions(-) create mode 100644 postgres/src/binary_copy.rs create mode 100644 postgres/src/lazy_pin.rs diff --git a/postgres/src/binary_copy.rs b/postgres/src/binary_copy.rs new file mode 100644 index 000000000..7828cb599 --- /dev/null +++ b/postgres/src/binary_copy.rs @@ -0,0 +1,92 @@ +//! Utilities for working with the PostgreSQL binary copy format. + +use crate::types::{ToSql, Type}; +use crate::{CopyInWriter, CopyOutReader, Error, Rt}; +use fallible_iterator::FallibleIterator; +use futures::StreamExt; +use std::pin::Pin; +#[doc(inline)] +pub use tokio_postgres::binary_copy::BinaryCopyOutRow; +use tokio_postgres::binary_copy::{self, BinaryCopyOutStream}; + +/// A type which serializes rows into the PostgreSQL binary copy format. +/// +/// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. +pub struct BinaryCopyInWriter<'a> { + runtime: Rt<'a>, + sink: Pin>, +} + +impl<'a> BinaryCopyInWriter<'a> { + /// Creates a new writer which will write rows of the provided types. + pub fn new(writer: CopyInWriter<'a>, types: &[Type]) -> BinaryCopyInWriter<'a> { + let stream = writer + .sink + .into_unpinned() + .expect("writer has already been written to"); + + BinaryCopyInWriter { + runtime: writer.runtime, + sink: Box::pin(binary_copy::BinaryCopyInWriter::new(stream, types)), + } + } + + /// Writes a single row. + /// + /// # Panics + /// + /// Panics if the number of values provided does not match the number expected. + pub fn write(&mut self, values: &[&(dyn ToSql + Sync)]) -> Result<(), Error> { + self.runtime.block_on(self.sink.as_mut().write(values)) + } + + /// A maximally-flexible version of `write`. + /// + /// # Panics + /// + /// Panics if the number of values provided does not match the number expected. + pub fn write_raw<'b, I>(&mut self, values: I) -> Result<(), Error> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + self.runtime.block_on(self.sink.as_mut().write_raw(values)) + } + + /// Completes the copy, returning the number of rows added. + /// + /// This method *must* be used to complete the copy process. If it is not, the copy will be aborted. + pub fn finish(mut self) -> Result { + self.runtime.block_on(self.sink.as_mut().finish()) + } +} + +/// An iterator of rows deserialized from the PostgreSQL binary copy format. +pub struct BinaryCopyOutIter<'a> { + runtime: Rt<'a>, + stream: Pin>, +} + +impl<'a> BinaryCopyOutIter<'a> { + /// Creates a new iterator from a raw copy out reader and the types of the columns being returned. + pub fn new(reader: CopyOutReader<'a>, types: &[Type]) -> BinaryCopyOutIter<'a> { + let stream = reader + .stream + .into_unpinned() + .expect("reader has already been read from"); + + BinaryCopyOutIter { + runtime: reader.runtime, + stream: Box::pin(BinaryCopyOutStream::new(stream, types)), + } + } +} + +impl FallibleIterator for BinaryCopyOutIter<'_> { + type Item = BinaryCopyOutRow; + type Error = Error; + + fn next(&mut self) -> Result, Error> { + self.runtime.block_on(self.stream.next()).transpose() + } +} diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 2621cbf70..3e69670dc 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -382,7 +382,7 @@ impl Client { T: ?Sized + ToStatement, { let stream = self.runtime.block_on(self.client.copy_out(query))?; - CopyOutReader::new(self.rt(), stream) + Ok(CopyOutReader::new(self.rt(), stream)) } /// Executes a sequence of SQL statements using the simple query protocol. diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index 9a9e4899e..fc11818ab 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,17 +1,17 @@ +use crate::lazy_pin::LazyPin; use crate::Rt; use bytes::{Bytes, BytesMut}; use futures::SinkExt; use std::io; use std::io::Write; -use std::pin::Pin; use tokio_postgres::{CopyInSink, Error}; /// The writer returned by the `copy_in` method. /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct CopyInWriter<'a> { - runtime: Rt<'a>, - sink: Pin>>, + pub(crate) runtime: Rt<'a>, + pub(crate) sink: LazyPin>, buf: BytesMut, } @@ -19,7 +19,7 @@ impl<'a> CopyInWriter<'a> { pub(crate) fn new(runtime: Rt<'a>, sink: CopyInSink) -> CopyInWriter<'a> { CopyInWriter { runtime, - sink: Box::pin(sink), + sink: LazyPin::new(sink), buf: BytesMut::new(), } } @@ -29,7 +29,7 @@ impl<'a> CopyInWriter<'a> { /// If this is not called, the copy will be aborted. pub fn finish(mut self) -> Result { self.flush_inner()?; - self.runtime.block_on(self.sink.as_mut().finish()) + self.runtime.block_on(self.sink.pinned().finish()) } fn flush_inner(&mut self) -> Result<(), Error> { @@ -38,7 +38,7 @@ impl<'a> CopyInWriter<'a> { } self.runtime - .block_on(self.sink.as_mut().send(self.buf.split().freeze())) + .block_on(self.sink.pinned().send(self.buf.split().freeze())) } } diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index c1bc2ff6e..9091e2200 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,34 +1,24 @@ +use crate::lazy_pin::LazyPin; use crate::Rt; use bytes::{Buf, Bytes}; use futures::StreamExt; -use std::io::{self, BufRead, Cursor, Read}; -use std::pin::Pin; -use tokio_postgres::{CopyOutStream, Error}; +use std::io::{self, BufRead, Read}; +use tokio_postgres::CopyOutStream; /// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { - runtime: Rt<'a>, - stream: Pin>, - cur: Cursor, + pub(crate) runtime: Rt<'a>, + pub(crate) stream: LazyPin, + cur: Bytes, } impl<'a> CopyOutReader<'a> { - pub(crate) fn new( - mut runtime: Rt<'a>, - stream: CopyOutStream, - ) -> Result, Error> { - let mut stream = Box::pin(stream); - let cur = match runtime.block_on(stream.next()) { - Some(Ok(cur)) => cur, - Some(Err(e)) => return Err(e), - None => Bytes::new(), - }; - - Ok(CopyOutReader { + pub(crate) fn new(runtime: Rt<'a>, stream: CopyOutStream) -> CopyOutReader<'a> { + CopyOutReader { runtime, - stream, - cur: Cursor::new(cur), - }) + stream: LazyPin::new(stream), + cur: Bytes::new(), + } } } @@ -44,15 +34,15 @@ impl Read for CopyOutReader<'_> { impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { - if self.cur.remaining() == 0 { - match self.runtime.block_on(self.stream.next()) { - Some(Ok(cur)) => self.cur = Cursor::new(cur), + if !self.cur.has_remaining() { + match self.runtime.block_on(self.stream.pinned().next()) { + Some(Ok(cur)) => self.cur = cur, Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), None => {} }; } - Ok(Buf::bytes(&self.cur)) + Ok(self.cur.bytes()) } fn consume(&mut self, amt: usize) { diff --git a/postgres/src/lazy_pin.rs b/postgres/src/lazy_pin.rs new file mode 100644 index 000000000..a18b58b84 --- /dev/null +++ b/postgres/src/lazy_pin.rs @@ -0,0 +1,28 @@ +use std::pin::Pin; + +pub(crate) struct LazyPin { + value: Box, + pinned: bool, +} + +impl LazyPin { + pub fn new(value: T) -> LazyPin { + LazyPin { + value: Box::new(value), + pinned: false, + } + } + + pub fn pinned(&mut self) -> Pin<&mut T> { + self.pinned = true; + unsafe { Pin::new_unchecked(&mut *self.value) } + } + + pub fn into_unpinned(self) -> Option { + if self.pinned { + None + } else { + Some(*self.value) + } + } +} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index bfe187647..af1125375 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -76,6 +76,8 @@ mod client; pub mod config; mod copy_in_writer; mod copy_out_reader; +mod lazy_pin; +pub mod binary_copy; mod row_iter; mod transaction; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 845d5d694..2275d715a 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -3,6 +3,8 @@ use tokio_postgres::types::Type; use tokio_postgres::NoTls; use super::*; +use crate::binary_copy::{BinaryCopyInWriter, BinaryCopyOutIter}; +use fallible_iterator::FallibleIterator; #[test] fn prepare() { @@ -188,6 +190,31 @@ fn copy_in_abort() { assert_eq!(rows.len(), 0); } +#[test] +fn binary_copy_in() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .simple_query("CREATE TEMPORARY TABLE foo (id INT, name TEXT)") + .unwrap(); + + let writer = client.copy_in("COPY foo FROM stdin BINARY").unwrap(); + let mut writer = BinaryCopyInWriter::new(writer, &[Type::INT4, Type::TEXT]); + writer.write(&[&1i32, &"steven"]).unwrap(); + writer.write(&[&2i32, &"timothy"]).unwrap(); + writer.finish().unwrap(); + + let rows = client + .query("SELECT id, name FROM foo ORDER BY id", &[]) + .unwrap(); + + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[0].get::<_, &str>(1), "steven"); + assert_eq!(rows[1].get::<_, i32>(0), 2); + assert_eq!(rows[1].get::<_, &str>(1), "timothy"); +} + #[test] fn copy_out() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); @@ -209,6 +236,32 @@ fn copy_out() { client.simple_query("SELECT 1").unwrap(); } +#[test] +fn binary_copy_out() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .simple_query( + "CREATE TEMPORARY TABLE foo (id INT, name TEXT); + INSERT INTO foo (id, name) VALUES (1, 'steven'), (2, 'timothy');", + ) + .unwrap(); + + let reader = client + .copy_out("COPY foo (id, name) TO STDOUT BINARY") + .unwrap(); + let rows = BinaryCopyOutIter::new(reader, &[Type::INT4, Type::TEXT]) + .collect::>() + .unwrap(); + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].get::(0), 1); + assert_eq!(rows[0].get::<&str>(1), "steven"); + assert_eq!(rows[1].get::(0), 2); + assert_eq!(rows[1].get::<&str>(1), "timothy"); + + client.simple_query("SELECT 1").unwrap(); +} + #[test] fn portal() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 2b397ca52..010e8b62e 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -155,7 +155,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { let stream = self.runtime.block_on(self.transaction.copy_out(query))?; - CopyOutReader::new(self.rt(), stream) + Ok(CopyOutReader::new(self.rt(), stream)) } /// Like `Client::simple_query`. diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 62c1a1a4f..6467501ea 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -22,11 +22,13 @@ pub async fn copy_out(client: &InnerClient, statement: Statement) -> Result Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + println!("a"); match responses.next().await? { Message::BindComplete => {} _ => return Err(Error::unexpected_message()), } + println!("b"); match responses.next().await? { Message::CopyOutResponse(_) => {} _ => return Err(Error::unexpected_message()), @@ -50,6 +52,7 @@ impl Stream for CopyOutStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); + println!("c"); match ready!(this.responses.poll_next(cx)?) { Message::CopyData(body) => Poll::Ready(Some(Ok(body.into_bytes()))), Message::CopyDone => Poll::Ready(None), From 584aca4c1719462a6c8818e2b246c239f291acdc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 15 Dec 2019 17:11:39 -0800 Subject: [PATCH 336/819] rustfmt --- postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index af1125375..b2ff8454d 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -72,12 +72,12 @@ pub use crate::row_iter::RowIter; pub use crate::tls::NoTls; pub use crate::transaction::*; +pub mod binary_copy; mod client; pub mod config; mod copy_in_writer; mod copy_out_reader; mod lazy_pin; -pub mod binary_copy; mod row_iter; mod transaction; From 3c85532a7649496c4d06e5a9925f06af75f690dd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 15 Dec 2019 17:34:11 -0800 Subject: [PATCH 337/819] Fix docs --- postgres/src/lib.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index b2ff8454d..a33354b98 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -36,15 +36,9 @@ //! //! # Implementation //! -//! This crate is a lightweight wrapper over tokio-postgres. The `tokio_postgres::Connection` is spawned onto an -//! executor, and the `tokio_postgres::Client` is wrapped in the `postgres::Client`, which simply waits on the futures -//! the nonblocking client creates. -//! -//! # Runtime -//! -//! A client can be constructed directly from a `tokio-postgres` client via a `From` implementation, but the `runtime` -//! Cargo feature (enabled by default) provides a more convenient interface. By default, connections will be spawned -//! onto a static tokio `Runtime`, but a custom `Executor` can also be used instead. +//! This crate is a lightweight wrapper over tokio-postgres. The `postgres::Client` is simply a wrapper around a +//! `tokio_postgres::Client` along side a tokio `Runtime`. The client simply blocks on the futures provided by the async +//! client. //! //! # SSL/TLS support //! From 309b647bdbf740283265942eb4711197b588e8e0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 15 Dec 2019 17:49:50 -0800 Subject: [PATCH 338/819] Publicly reexport fallible-iterator from postgres Closes #520 --- postgres/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index a33354b98..801eeb9c6 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -49,6 +49,7 @@ #![doc(html_root_url = "https://docs.rs/postgres/0.17")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] +pub use fallible_iterator; pub use tokio_postgres::{ error, row, tls, types, Column, Portal, SimpleQueryMessage, Socket, Statement, ToStatement, }; From d9a999a578a9574de478261dca845bbefb3772d8 Mon Sep 17 00:00:00 2001 From: "Michael P. Jung" Date: Mon, 16 Dec 2019 14:36:23 +0100 Subject: [PATCH 339/819] Add back logging of queries This fixes #492 --- tokio-postgres/src/query.rs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 76d14e5cc..a821caf89 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -21,7 +21,17 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = encode(client, &statement, params)?; + let buf = if log::log_enabled!(log::Level::Debug) { + let params = params.into_iter().collect::>(); + log::debug!( + "executing statement {} with parameters: {:?}", + statement.name(), + params, + ); + encode(client, &statement, params)? + } else { + encode(client, &statement, params)? + }; let responses = start(client, buf).await?; Ok(RowStream { statement, @@ -59,7 +69,17 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = encode(client, &statement, params)?; + let buf = if log::log_enabled!(log::Level::Debug) { + let params = params.into_iter().collect::>(); + log::debug!( + "executing statement {} with parameters: {:?}", + statement.name(), + params, + ); + encode(client, &statement, params)? + } else { + encode(client, &statement, params)? + }; let mut responses = start(client, buf).await?; loop { From 8ee231f803516ad6612adcd4dbc6c79515fd742e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 17 Dec 2019 18:11:40 -0800 Subject: [PATCH 340/819] Add remaining log statements --- tokio-postgres/src/copy_in.rs | 3 +++ tokio-postgres/src/copy_out.rs | 3 +++ tokio-postgres/src/prepare.rs | 7 +++++++ tokio-postgres/src/query.rs | 9 +++++---- tokio-postgres/src/simple_query.rs | 5 +++++ 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index f9b8f5999..fc712f6db 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -7,6 +7,7 @@ use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; use futures::future; use futures::{ready, Sink, SinkExt, Stream, StreamExt}; +use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -199,6 +200,8 @@ pub async fn copy_in(client: &InnerClient, statement: Statement) -> Result Result { + debug!("executing copy out statement {}", statement.name()); + let buf = query::encode(client, &statement, slice_iter(&[]))?; let responses = start(client, buf).await?; Ok(CopyOutStream { diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 57927b8f4..49397debf 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -8,6 +8,7 @@ use crate::{Column, Error, Statement}; use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::{pin_mut, TryStreamExt}; +use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; @@ -111,6 +112,12 @@ fn prepare_rec<'a>( } fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Result { + if types.is_empty() { + debug!("preparing query {}: {}", name, query); + } else { + debug!("preparing query {} with types {:?}: {}", name, types, query); + } + client.with_buf(|buf| { frontend::parse(name, query, types.iter().map(Type::oid), buf).map_err(Error::encode)?; frontend::describe(b'S', &name, buf).map_err(Error::encode)?; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index a821caf89..7792f0a8a 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -5,6 +5,7 @@ use crate::types::{IsNull, ToSql}; use crate::{Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; use futures::{ready, Stream}; +use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -21,9 +22,9 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = if log::log_enabled!(log::Level::Debug) { + let buf = if log_enabled!(Level::Debug) { let params = params.into_iter().collect::>(); - log::debug!( + debug!( "executing statement {} with parameters: {:?}", statement.name(), params, @@ -69,9 +70,9 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let buf = if log::log_enabled!(log::Level::Debug) { + let buf = if log_enabled!(Level::Debug) { let params = params.into_iter().collect::>(); - log::debug!( + debug!( "executing statement {} with parameters: {:?}", statement.name(), params, diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 04899fb9a..82ac35664 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -5,6 +5,7 @@ use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; use futures::{ready, Stream}; +use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -14,6 +15,8 @@ use std::sync::Arc; use std::task::{Context, Poll}; pub async fn simple_query(client: &InnerClient, query: &str) -> Result { + debug!("executing simple query: {}", query); + let buf = encode(client, query)?; let responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -25,6 +28,8 @@ pub async fn simple_query(client: &InnerClient, query: &str) -> Result Result<(), Error> { + debug!("executing statement batch: {}", query); + let buf = encode(client, query)?; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; From 37d0a83434db8be2671b6fef541298458e7dd035 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 19 Dec 2019 16:41:26 -0800 Subject: [PATCH 341/819] remove uuid 0.7 support --- postgres-types/Cargo.toml | 2 -- postgres-types/src/lib.rs | 2 -- postgres-types/src/uuid_07.rs | 25 ---------------------- postgres/Cargo.toml | 1 - tokio-postgres/Cargo.toml | 1 - tokio-postgres/src/to_statement.rs | 2 +- tokio-postgres/tests/test/types/mod.rs | 2 -- tokio-postgres/tests/test/types/uuid_07.rs | 18 ---------------- 8 files changed, 1 insertion(+), 52 deletions(-) delete mode 100644 postgres-types/src/uuid_07.rs delete mode 100644 tokio-postgres/tests/test/types/uuid_07.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 0042b6d25..9df512458 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -17,7 +17,6 @@ with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] with-geo-types-0_4 = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] -with-uuid-0_7 = ["uuid-07"] with-uuid-0_8 = ["uuid-08"] [dependencies] @@ -32,5 +31,4 @@ eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } -uuid-07 = { version = "0.7", package = "uuid", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index db4aef4c6..fc9888a5a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -198,8 +198,6 @@ mod eui48_04; mod geo_types_04; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "with-uuid-0_7")] -mod uuid_07; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; diff --git a/postgres-types/src/uuid_07.rs b/postgres-types/src/uuid_07.rs deleted file mode 100644 index 60f85a52a..000000000 --- a/postgres-types/src/uuid_07.rs +++ /dev/null @@ -1,25 +0,0 @@ -use bytes::BytesMut; -use postgres_protocol::types; -use std::error::Error; -use uuid_07::Uuid; - -use crate::{FromSql, IsNull, ToSql, Type}; - -impl<'a> FromSql<'a> for Uuid { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let bytes = types::uuid_from_sql(raw)?; - Ok(Uuid::from_bytes(bytes)) - } - - accepts!(UUID); -} - -impl ToSql for Uuid { - fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::uuid_to_sql(*self.as_bytes(), w); - Ok(IsNull::No) - } - - accepts!(UUID); - to_sql_checked!(); -} diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index e766aa8ee..178c46541 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -26,7 +26,6 @@ with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] -with-uuid-0_7 = ["tokio-postgres/with-uuid-0_7"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] [dependencies] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index d48b9e652..df2707fcc 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -32,7 +32,6 @@ with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] -with-uuid-0_7 = ["postgres-types/with-uuid-0_7"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] [dependencies] diff --git a/tokio-postgres/src/to_statement.rs b/tokio-postgres/src/to_statement.rs index 3b4026e56..3ff82493c 100644 --- a/tokio-postgres/src/to_statement.rs +++ b/tokio-postgres/src/to_statement.rs @@ -27,7 +27,7 @@ mod private { /// was prepared previously. /// /// This trait is "sealed" and cannot be implemented by anything outside this crate. -pub trait ToStatement: private::Sealed { +pub trait ToStatement: Sealed { #[doc(hidden)] fn __convert(&self) -> ToStatementType<'_>; } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 8d411d54e..df6a2e934 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -22,8 +22,6 @@ mod eui48_04; mod geo_010; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "with-uuid-0_7")] -mod uuid_07; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; diff --git a/tokio-postgres/tests/test/types/uuid_07.rs b/tokio-postgres/tests/test/types/uuid_07.rs deleted file mode 100644 index 563cfa4cb..000000000 --- a/tokio-postgres/tests/test/types/uuid_07.rs +++ /dev/null @@ -1,18 +0,0 @@ -use uuid_07::Uuid; - -use crate::types::test_type; - -#[tokio::test] -async fn test_uuid_params() { - test_type( - "UUID", - &[ - ( - Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), - "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", - ), - (None, "NULL"), - ], - ) - .await -} From 3592e0553e5a997cb20bc3ac4d968bcd7248548a Mon Sep 17 00:00:00 2001 From: Colin Maxfield Date: Fri, 20 Dec 2019 20:35:17 -0500 Subject: [PATCH 342/819] Adding in generic connection trait back to the lib --- postgres/src/client.rs | 17 ++++++++++++++++- postgres/src/generic_connection.rs | 18 ++++++++++++++++++ postgres/src/lib.rs | 2 ++ postgres/src/transaction.rs | 17 ++++++++++++++++- 4 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 postgres/src/generic_connection.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 3e69670dc..c4a18b79e 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,4 +1,4 @@ -use crate::{Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; +use crate::{Config, CopyInWriter, CopyOutReader, GenericConnection, RowIter, Statement, ToStatement, Transaction}; use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; @@ -450,3 +450,18 @@ impl Client { self.client.is_closed() } } + +impl GenericConnection for Client { + fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { + self.execute(query, params) + } + fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error> { + self.query(query, params) + } + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } + fn transaction(&mut self) -> Result, Error> { + self.transaction() + } +} diff --git a/postgres/src/generic_connection.rs b/postgres/src/generic_connection.rs new file mode 100644 index 000000000..fcf1755eb --- /dev/null +++ b/postgres/src/generic_connection.rs @@ -0,0 +1,18 @@ +use crate::{Statement, Transaction}; +use tokio_postgres::types::{ToSql}; +use tokio_postgres::{Error, Row}; + +/// A trait allowing abstraction over connections and transactions +pub trait GenericConnection { + /// Like `Client::execute`. + fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result; + + /// Like `Client::query`. + fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error>; + + /// Like `Client::prepare`. + fn prepare(&mut self, query: &str) -> Result; + + /// Like `Client::transaction`. + fn transaction(&mut self) -> Result, Error>; +} diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 801eeb9c6..0831b80aa 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -60,6 +60,7 @@ pub use crate::copy_in_writer::CopyInWriter; pub use crate::copy_out_reader::CopyOutReader; #[doc(no_inline)] pub use crate::error::Error; +pub use crate::generic_connection::GenericConnection; #[doc(no_inline)] pub use crate::row::{Row, SimpleQueryRow}; pub use crate::row_iter::RowIter; @@ -72,6 +73,7 @@ mod client; pub mod config; mod copy_in_writer; mod copy_out_reader; +mod generic_connection; mod lazy_pin; mod row_iter; mod transaction; diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 010e8b62e..f32880295 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,4 +1,4 @@ -use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement}; +use crate::{CopyInWriter, CopyOutReader, GenericConnection, Portal, RowIter, Rt, Statement, ToStatement}; use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -177,3 +177,18 @@ impl<'a> Transaction<'a> { }) } } + +impl<'a> GenericConnection for Transaction<'a> { + fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { + self.execute(query, params) + } + fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error> { + self.query(query, params) + } + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } + fn transaction(&mut self) -> Result, Error> { + self.transaction() + } +} From e8aef6579e51155bde223c8a26ee6f8a4b3d6bd9 Mon Sep 17 00:00:00 2001 From: Colin Maxfield Date: Fri, 20 Dec 2019 23:28:32 -0500 Subject: [PATCH 343/819] Fixing fmt on the files I changed, forgot to check prior to previous commit --- postgres/src/client.rs | 5 ++++- postgres/src/generic_connection.rs | 18 +++++++++--------- postgres/src/transaction.rs | 4 +++- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c4a18b79e..b24a568ab 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,4 +1,7 @@ -use crate::{Config, CopyInWriter, CopyOutReader, GenericConnection, RowIter, Statement, ToStatement, Transaction}; +use crate::{ + Config, CopyInWriter, CopyOutReader, GenericConnection, RowIter, Statement, ToStatement, + Transaction, +}; use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; diff --git a/postgres/src/generic_connection.rs b/postgres/src/generic_connection.rs index fcf1755eb..47df4c20a 100644 --- a/postgres/src/generic_connection.rs +++ b/postgres/src/generic_connection.rs @@ -1,18 +1,18 @@ use crate::{Statement, Transaction}; -use tokio_postgres::types::{ToSql}; +use tokio_postgres::types::ToSql; use tokio_postgres::{Error, Row}; /// A trait allowing abstraction over connections and transactions pub trait GenericConnection { - /// Like `Client::execute`. - fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result; + /// Like `Client::execute`. + fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result; - /// Like `Client::query`. - fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error>; + /// Like `Client::query`. + fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error>; - /// Like `Client::prepare`. - fn prepare(&mut self, query: &str) -> Result; + /// Like `Client::prepare`. + fn prepare(&mut self, query: &str) -> Result; - /// Like `Client::transaction`. - fn transaction(&mut self) -> Result, Error>; + /// Like `Client::transaction`. + fn transaction(&mut self) -> Result, Error>; } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index f32880295..c5639387d 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,4 +1,6 @@ -use crate::{CopyInWriter, CopyOutReader, GenericConnection, Portal, RowIter, Rt, Statement, ToStatement}; +use crate::{ + CopyInWriter, CopyOutReader, GenericConnection, Portal, RowIter, Rt, Statement, ToStatement, +}; use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; From 869f2d7e47ac28ad0a7ceb8b34160c2db1edf82b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 21 Dec 2019 04:21:14 -0800 Subject: [PATCH 344/819] Remove stray dev dependency --- postgres/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 178c46541..3fa33007b 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -39,4 +39,3 @@ log = "0.4" [dev-dependencies] criterion = "0.3" -tokio = "0.2" From 2615441c7dc3d160be27731b7e5872b87cc2be5b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 21 Dec 2019 18:14:28 -0800 Subject: [PATCH 345/819] Doc cleanup and "real" extensible enums --- .circleci/config.yml | 2 +- postgres-protocol/src/message/backend.rs | 3 +-- postgres-types/src/lib.rs | 3 +-- tokio-postgres/src/config.rs | 13 +++++-------- tokio-postgres/src/connect_raw.rs | 1 - tokio-postgres/src/connect_tls.rs | 1 - tokio-postgres/src/lib.rs | 15 +++++---------- tokio-postgres/src/transaction.rs | 10 ++++++---- 8 files changed, 19 insertions(+), 29 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 19d845ba4..acaec4a32 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.39.0 + - image: rust:1.40.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:6 diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 7c704511e..68b5aa6e5 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -72,6 +72,7 @@ impl Header { } /// An enum representing Postgres backend messages. +#[non_exhaustive] pub enum Message { AuthenticationCleartextPassword, AuthenticationGss, @@ -104,8 +105,6 @@ pub enum Message { PortalSuspended, ReadyForQuery(ReadyForQueryBody), RowDescription(RowDescriptionBody), - #[doc(hidden)] - __ForExtensibility, } impl Message { diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index fc9888a5a..0b1a0f2c4 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -263,6 +263,7 @@ impl Type { /// Represents the kind of a Postgres type. #[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[non_exhaustive] pub enum Kind { /// A simple type like `VARCHAR` or `INTEGER`. Simple, @@ -278,8 +279,6 @@ pub enum Kind { Domain(Type), /// A composite type along with information about its fields. Composite(Vec), - #[doc(hidden)] - __ForExtensibility, } /// Information about a field of a composite type. diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 0dc6d5bf8..0cba8ec8d 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -24,17 +24,17 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// Properties required of a session. #[derive(Debug, Copy, Clone, PartialEq)] +#[non_exhaustive] pub enum TargetSessionAttrs { /// No special properties are required. Any, /// The session must allow writes. ReadWrite, - #[doc(hidden)] - __NonExhaustive, } /// TLS configuration. #[derive(Debug, Copy, Clone, PartialEq)] +#[non_exhaustive] pub enum SslMode { /// Do not use TLS. Disable, @@ -42,12 +42,11 @@ pub enum SslMode { Prefer, /// Require the use of TLS. Require, - #[doc(hidden)] - __NonExhaustive, } /// Channel binding configuration. #[derive(Debug, Copy, Clone, PartialEq)] +#[non_exhaustive] pub enum ChannelBinding { /// Do not use channel binding. Disable, @@ -55,8 +54,6 @@ pub enum ChannelBinding { Prefer, /// Require the use of channel binding. Require, - #[doc(hidden)] - __NonExhaustive, } #[derive(Debug, Clone, PartialEq)] @@ -121,7 +118,7 @@ pub(crate) enum Host { /// # Url /// /// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, -/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// and the format accepts query parameters for all of the key-value pairs described in the section above. Multiple /// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, /// as the path component of the URL specifies the database name. /// @@ -425,7 +422,7 @@ impl Config { /// Connects to a PostgreSQL database over an arbitrary stream. /// - /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application` name are ignored. + /// All of the settings other than `user`, `password`, `dbname`, `options`, and `application_name` name are ignored. pub async fn connect_raw( &self, stream: S, diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index f9149dfce..7ccfe9b5c 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -197,7 +197,6 @@ fn can_skip_channel_binding(config: &Config) -> Result<(), Error> { config::ChannelBinding::Require => Err(Error::authentication( "server did not use channel binding".into(), )), - config::ChannelBinding::__NonExhaustive => unreachable!(), } } diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 03aaa0bc0..5ef21ac5c 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -22,7 +22,6 @@ where return Ok(MaybeTlsStream::Raw(stream)) } SslMode::Prefer | SslMode::Require => {} - SslMode::__NonExhaustive => unreachable!(), } let mut buf = BytesMut::new(); diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 70fee32f9..dd0d8aba6 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -21,15 +21,12 @@ //! } //! }); //! -//! // Now we can prepare a simple statement that just returns its parameter. -//! let stmt = client.prepare("SELECT $1::TEXT").await?; -//! -//! // And then execute it, returning a list of the resulting rows. +//! // Now we can execute a simple statement that just returns its parameter. //! let rows = client -//! .query(&stmt, &[&"hello world"]) +//! .query("SELECT $1::TEXT", &[&"hello world"]) //! .await?; //! -//! // Now we can check that we got back the same string we sent over. +//! // And then check that we got back the same string we sent over. //! let value: &str = rows[0].get(0); //! assert_eq!(value, "hello world"); //! @@ -201,6 +198,7 @@ impl Notification { /// An asynchronous message from the server. #[allow(clippy::large_enum_variant)] +#[non_exhaustive] pub enum AsyncMessage { /// A notice. /// @@ -210,11 +208,10 @@ pub enum AsyncMessage { /// /// Connections can subscribe to notifications with the `LISTEN` command. Notification(Notification), - #[doc(hidden)] - __NonExhaustive, } /// Message returned by the `SimpleQuery` stream. +#[non_exhaustive] pub enum SimpleQueryMessage { /// A row of data. Row(SimpleQueryRow), @@ -222,8 +219,6 @@ pub enum SimpleQueryMessage { /// /// The number of rows modified or selected is returned. CommandComplete(u64), - #[doc(hidden)] - __NonExhaustive, } fn slice_iter<'a>( diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index aaf7efd85..c97d5f9f4 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -184,13 +184,13 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.bind_iter(statement, slice_iter(params)).await + self.bind_raw(statement, slice_iter(params)).await } - /// Like [`bind`], but takes an iterator of parameters rather than a slice. + /// A maximally flexible version of [`bind`]. /// /// [`bind`]: #method.bind - pub async fn bind_iter<'b, T, I>(&self, statement: &T, params: I) -> Result + pub async fn bind_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, I: IntoIterator, @@ -211,7 +211,9 @@ impl<'a> Transaction<'a> { .await } - /// The maximally flexible version of `query_portal`. + /// The maximally flexible version of [`query_portal`]. + /// + /// [`query_portal`]: #method.query_portal pub async fn query_portal_raw( &self, portal: &Portal, From 188e5eb6569c4a683ea460f5d86a8c602e38fb43 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 23 Dec 2019 22:59:25 +0000 Subject: [PATCH 346/819] Update base64 requirement from 0.10 to 0.11 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Release notes](https://github.com/marshallpierce/rust-base64/releases) - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.10.0...v0.11.0) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 7ad9d4996..b04214f28 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres-protocol" readme = "../README.md" [dependencies] -base64 = "0.10" +base64 = "0.11" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" From 9b443da418c482bb3f4e0e265bb312ad59dea2c4 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 23 Dec 2019 22:59:56 +0000 Subject: [PATCH 347/819] Update md5 requirement from 0.6 to 0.7 Updates the requirements on [md5](https://github.com/stainless-steel/md5) to permit the latest version. - [Release notes](https://github.com/stainless-steel/md5/releases) - [Commits](https://github.com/stainless-steel/md5/commits) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 7ad9d4996..4705e97d1 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -15,7 +15,7 @@ bytes = "0.5" fallible-iterator = "0.2" generic-array = "0.13" hmac = "0.7" -md5 = "0.6" +md5 = "0.7" memchr = "2.0" rand = "0.7" sha2 = "0.8" From 1c5ceebe8a6f4fbd8d494e7e145fa53063db46fa Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:21:52 -0800 Subject: [PATCH 348/819] Release postgres-protocol v0.5.0 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 400c27936..375465b22 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.0 - 2019-12-23 + +### Changed + +* `frontend::Message` is now a true non-exhaustive enum. + ## v0.5.0-alpha.2 - 2019-11-27 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 7331d4e97..155cf62a1 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.0-alpha.2" +version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 9df512458..504abcd25 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -22,7 +22,7 @@ with-uuid-0_8 = ["uuid-08"] [dependencies] bytes = "0.5" fallible-iterator = "0.2" -postgres-protocol = { version = "=0.5.0-alpha.2", path = "../postgres-protocol" } +postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-derive = { version = "=0.4.0-alpha.1", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index df2707fcc..98e054f54 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -44,7 +44,7 @@ parking_lot = "0.10" percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" -postgres-protocol = { version = "=0.5.0-alpha.2", path = "../postgres-protocol" } +postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "=0.1.0-alpha.2", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.2", features = ["codec"] } From ca80f647e81235d54ff2a0de0d32ac57676321c9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:27:41 -0800 Subject: [PATCH 349/819] Release postgres-derive v0.4.0 --- postgres-derive/CHANGELOG.md | 6 +++++- postgres-derive/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 79de77480..354f6f277 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,9 @@ # Change Log -## v0.1.0-alpha.1 - 2019-10-14 +## v0.4.0 - 2019-12-23 + +No changes + +## v0.4.0-alpha.1 - 2019-10-14 * Initial release diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 0a9c2e678..293c294a0 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.0-alpha.1" +version = "0.4.0" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 504abcd25..b3221fbfb 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -23,7 +23,7 @@ with-uuid-0_8 = ["uuid-08"] bytes = "0.5" fallible-iterator = "0.2" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-derive = { version = "=0.4.0-alpha.1", optional = true, path = "../postgres-derive" } +postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } From 245ccb7babd4688a39334fd0885499b57e65f5fa Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:31:41 -0800 Subject: [PATCH 350/819] Release postgres-types v0.1.0 --- postgres-types/CHANGELOG.md | 14 ++++++++++++++ postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 071a2e65e..1569598eb 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,19 @@ # Change Log +## v0.1.0 - 2019-12-23 + +### Changed + +* `Kind` is now a true non-exhaustive enum. + +### Removed + +* Removed `uuid` 0.7 support. + +### Added + +* Added a `Hash` implementation for `Type`. + ## v0.1.0-alpha.2 - 2019-11-27 ### Changed diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index b3221fbfb..4add13815 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.0-alpha.2" +version = "0.1.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 98e054f54..a36ab9db0 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,7 @@ percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-types = { version = "=0.1.0-alpha.2", path = "../postgres-types" } +postgres-types = { version = "0.1.0", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.2", features = ["codec"] } From 1f773f08e32a7f2e3820d03586b3db8ccd6a894b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:40:49 -0800 Subject: [PATCH 351/819] Release tokio-postgres v0.5.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 20 ++++++++++++++++++++ tokio-postgres/Cargo.toml | 2 +- 5 files changed, 24 insertions(+), 4 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 12d37c40c..188108fb2 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3" native-tls = "0.2" tokio = "0.2" tokio-tls = "0.3" -tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 8165b89db..faa865470 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3" openssl = "0.10" tokio = "0.2" tokio-openssl = "0.4" -tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 3fa33007b..94b86e553 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -32,7 +32,7 @@ with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "=0.5.0-alpha.2", path = "../tokio-postgres" } +tokio-postgres = { version = "0.5.0", path = "../tokio-postgres" } tokio = { version = "0.2", features = ["rt-core"] } log = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 85f8bbdde..a261b7d1d 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,25 @@ # Change Log +## v0.5.0 - 2019-12-23 + +### Changed + +* `Client::copy_in` now returns a `Sink` rather than taking in a `Stream`. +* `CopyStream` has been renamed to `CopyOutStream`. +* `Client::copy_in` and `Client::copy_out` no longer take query parameters as PostgreSQL doesn't support parameters in + COPY queries. +* `TargetSessionAttrs`, `SslMode`, and `ChannelBinding` are now true non-exhaustive enums. + +### Added + +* Added `Client::query_opt` for queries expected to return zero or one rows. +* Added binary copy format support to the `binary_copy` module. +* Added back query logging. + +### Removed + +* Removed `uuid` 0.7 support. + ## v0.5.0-alpha.2 - 2019-11-27 ### Changed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index a36ab9db0..5e10aee59 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.0-alpha.2" +version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 69412cf7e37a23dab5ccf64c1464200e1826cc03 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:52:16 -0800 Subject: [PATCH 352/819] Released postgres v0.17.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 21 +++++++++++++++++++++ postgres/Cargo.toml | 2 +- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 188108fb2..d6b0bd8b9 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -25,4 +25,4 @@ tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "0.2", features = ["full"] } -postgres = { version = "=0.17.0-alpha.2", path = "../postgres" } +postgres = { version = "0.17.0", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index faa865470..b15b6691a 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -25,4 +25,4 @@ tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "0.2", features = ["full"] } -postgres = { version = "=0.17.0-alpha.2", path = "../postgres" } +postgres = { version = "0.17.0", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 11693ca5e..686830db1 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,26 @@ # Change Log +## v0.17.0 - 2019-12-23 + +### Changed + +* Each `Client` now has its own non-threaded tokio `Runtime` rather than sharing a global threaded `Runtime`. This + significantly improves performance by minimizing context switches and cross-thread synchronization. +* `Client::copy_in` now returns a writer rather than taking in a reader. +* `Client::query_raw` now returns a named type. +* `Client::copy_in` and `Client::copy_out` no longer take query parameters as PostgreSQL doesn't support them in COPY + queries. + +### Removed + +* Removed support for `uuid` 0.7. + +### Added + +* Added `Client::query_opt` for queries that are expected to return zero or one rows. +* Added binary copy support in the `binary_copy` module. +* The `fallible-iterator` crate is now publicly reexported. + ## v0.17.0-alpha.2 - 2019-11-27 ### Changed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 94b86e553..41d500325 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.0-alpha.2" +version = "0.17.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From f3cf1de93e531200741f7066e49644098e85f4b0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:57:28 -0800 Subject: [PATCH 353/819] Release postgres-openssl v0.3.0 --- postgres-openssl/CHANGELOG.md | 6 ++++++ postgres-openssl/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index d1a32fbfb..45a1bd065 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.3.0 - 2019-12-23 + +### Changed + +* Upgraded to `tokio-postgres` 0.5. + ## v0.3.0-alpha.2 - 2019-11-27 ### Changed diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index b15b6691a..a3c9f65fa 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.3.0-alpha.2" +version = "0.3.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 7413ffb793ccaf0089be22cef70bb4f38af3213c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 15:59:17 -0800 Subject: [PATCH 354/819] Release postgres-native-tls v0.3.0 --- postgres-native-tls/CHANGELOG.md | 6 ++++++ postgres-native-tls/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index 6bd09c065..fd9180b3d 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.3.0 - 2019-12-23 + +### Changed + +* Upgraded to `tokio-postgres` 0.5. + ## v0.3.0-alpha.2 - 2019-11-27 ### Changed diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index d6b0bd8b9..e2d60d1fa 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.3.0-alpha.2" +version = "0.3.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 75a67fd06bb3cec6d702e36f55a645c943be5874 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 23 Dec 2019 16:12:28 -0800 Subject: [PATCH 355/819] Remove unneeded generic-array dependency --- postgres-protocol/Cargo.toml | 1 - postgres-protocol/src/authentication/sasl.rs | 12 +++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 155cf62a1..ae45eb63d 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,7 +13,6 @@ base64 = "0.11" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" -generic-array = "0.13" hmac = "0.7" md5 = "0.7" memchr = "2.0" diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index bcd2c4b61..af458bbaf 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,7 +1,5 @@ //! SASL-based authentication support. -use generic_array::typenum::U32; -use generic_array::GenericArray; use hmac::{Hmac, Mac}; use rand::{self, Rng}; use sha2::{Digest, Sha256}; @@ -33,13 +31,13 @@ fn normalize(pass: &[u8]) -> Vec { } } -fn hi(str: &[u8], salt: &[u8], i: u32) -> GenericArray { +fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { let mut hmac = Hmac::::new_varkey(str).expect("HMAC is able to accept all key sizes"); hmac.input(salt); hmac.input(&[0, 0, 0, 1]); let mut prev = hmac.result().code(); - let mut hi = GenericArray::::clone_from_slice(&prev); + let mut hi = prev; for _ in 1..i { let mut hmac = Hmac::::new_varkey(str).expect("already checked above"); @@ -51,7 +49,7 @@ fn hi(str: &[u8], salt: &[u8], i: u32) -> GenericArray { } } - hi + hi.into() } enum ChannelBindingInner { @@ -103,7 +101,7 @@ enum State { channel_binding: ChannelBinding, }, Finish { - salted_password: GenericArray, + salted_password: [u8; 32], auth_message: String, }, Done, @@ -220,7 +218,7 @@ impl ScramSha256 { hmac.input(auth_message.as_bytes()); let client_signature = hmac.result(); - let mut client_proof = GenericArray::::clone_from_slice(&client_key); + let mut client_proof = client_key; for (proof, signature) in client_proof.iter_mut().zip(client_signature.code()) { *proof ^= signature; } From a8fa2a42c01bcf1b9aee54adb0b7f0aa81f552b7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 24 Dec 2019 12:43:23 -0800 Subject: [PATCH 356/819] Use batch_execute in example --- postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 801eeb9c6..42b28f315 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -8,7 +8,7 @@ //! # fn main() -> Result<(), postgres::Error> { //! let mut client = Client::connect("host=localhost user=postgres", NoTls)?; //! -//! client.simple_query(" +//! client.batch_execute(" //! CREATE TABLE person ( //! id SERIAL PRIMARY KEY, //! name TEXT NOT NULL, From 6d18d7955e344f9b9fae7b305ce886eef9b7f256 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 25 Dec 2019 11:12:34 -0500 Subject: [PATCH 357/819] Remove debugging printlns from copy out protocol --- tokio-postgres/src/copy_out.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index fca831886..52691b963 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -25,13 +25,11 @@ pub async fn copy_out(client: &InnerClient, statement: Statement) -> Result Result { let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - println!("a"); match responses.next().await? { Message::BindComplete => {} _ => return Err(Error::unexpected_message()), } - println!("b"); match responses.next().await? { Message::CopyOutResponse(_) => {} _ => return Err(Error::unexpected_message()), @@ -55,7 +53,6 @@ impl Stream for CopyOutStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); - println!("c"); match ready!(this.responses.poll_next(cx)?) { Message::CopyData(body) => Poll::Ready(Some(Ok(body.into_bytes()))), Message::CopyDone => Poll::Ready(None), From 2093f3e1ee4e85e5237ef117d1abe79663dbcc17 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 25 Dec 2019 08:19:04 -0800 Subject: [PATCH 358/819] Release tokio-postgres v0.5.1 --- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index a261b7d1d..95bf19bc7 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.1 - 2019-12-25 + +### Fixed + +* Removed some stray `println!`s from `copy_out` internals. + ## v0.5.0 - 2019-12-23 ### Changed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 5e10aee59..ec43f1aa2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.0" +version = "0.5.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 793e83a4ef05f3710c780bb2c08f1ac556ae8a46 Mon Sep 17 00:00:00 2001 From: Colin Maxfield Date: Sat, 28 Dec 2019 16:32:01 -0500 Subject: [PATCH 359/819] Adding in generic for the query function of genericconnection --- postgres/src/client.rs | 5 ++++- postgres/src/generic_connection.rs | 6 ++++-- postgres/src/transaction.rs | 5 ++++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index b24a568ab..e513e51e6 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -458,7 +458,10 @@ impl GenericConnection for Client { fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { self.execute(query, params) } - fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error> { + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement, + { self.query(query, params) } fn prepare(&mut self, query: &str) -> Result { diff --git a/postgres/src/generic_connection.rs b/postgres/src/generic_connection.rs index 47df4c20a..a25b283b6 100644 --- a/postgres/src/generic_connection.rs +++ b/postgres/src/generic_connection.rs @@ -1,4 +1,4 @@ -use crate::{Statement, Transaction}; +use crate::{Statement, ToStatement, Transaction}; use tokio_postgres::types::ToSql; use tokio_postgres::{Error, Row}; @@ -8,7 +8,9 @@ pub trait GenericConnection { fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result; /// Like `Client::query`. - fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error>; + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement; /// Like `Client::prepare`. fn prepare(&mut self, query: &str) -> Result; diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index c5639387d..de49489a7 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -184,7 +184,10 @@ impl<'a> GenericConnection for Transaction<'a> { fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { self.execute(query, params) } - fn query(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result, Error> { + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement, + { self.query(query, params) } fn prepare(&mut self, query: &str) -> Result { From 508b43075397a38b45ff2a7c9578f14e8e4dad30 Mon Sep 17 00:00:00 2001 From: Colin Maxfield Date: Sat, 28 Dec 2019 16:36:56 -0500 Subject: [PATCH 360/819] Adding generic for execute function for genericconnection --- postgres/src/client.rs | 5 ++++- postgres/src/generic_connection.rs | 4 +++- postgres/src/transaction.rs | 5 ++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e513e51e6..e9a03cebe 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -455,7 +455,10 @@ impl Client { } impl GenericConnection for Client { - fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { + fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { self.execute(query, params) } fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> diff --git a/postgres/src/generic_connection.rs b/postgres/src/generic_connection.rs index a25b283b6..3a8e67883 100644 --- a/postgres/src/generic_connection.rs +++ b/postgres/src/generic_connection.rs @@ -5,7 +5,9 @@ use tokio_postgres::{Error, Row}; /// A trait allowing abstraction over connections and transactions pub trait GenericConnection { /// Like `Client::execute`. - fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result; + fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement; /// Like `Client::query`. fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index de49489a7..98b6bff51 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -181,7 +181,10 @@ impl<'a> Transaction<'a> { } impl<'a> GenericConnection for Transaction<'a> { - fn execute(&mut self, query: &str, params: &[&(dyn ToSql + Sync)]) -> Result { + fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { self.execute(query, params) } fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> From 33dbbcbee7072954609ee5a0eb7924fdf9522624 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 25 Dec 2019 18:35:40 -0500 Subject: [PATCH 361/819] Support cancellation in synchronous client --- postgres/src/cancel_token.rs | 36 ++++++++++++++++ postgres/src/client.rs | 44 ++++++++++++++++++- postgres/src/lib.rs | 2 + postgres/src/test.rs | 21 ++++++++++ postgres/src/transaction.rs | 9 +++- tokio-postgres/src/cancel_token.rs | 63 ++++++++++++++++++++++++++++ tokio-postgres/src/client.rs | 37 ++++++++-------- tokio-postgres/src/lib.rs | 2 + tokio-postgres/src/transaction.rs | 13 +++++- tokio-postgres/tests/test/main.rs | 3 +- tokio-postgres/tests/test/runtime.rs | 3 +- 11 files changed, 208 insertions(+), 25 deletions(-) create mode 100644 postgres/src/cancel_token.rs create mode 100644 tokio-postgres/src/cancel_token.rs diff --git a/postgres/src/cancel_token.rs b/postgres/src/cancel_token.rs new file mode 100644 index 000000000..f140e60e2 --- /dev/null +++ b/postgres/src/cancel_token.rs @@ -0,0 +1,36 @@ +use tokio::runtime; +use tokio_postgres::tls::MakeTlsConnect; +use tokio_postgres::{Error, Socket}; + +/// The capability to request cancellation of in-progress queries on a +/// connection. +#[derive(Clone)] +pub struct CancelToken(tokio_postgres::CancelToken); + +impl CancelToken { + pub(crate) fn new(inner: tokio_postgres::CancelToken) -> CancelToken { + CancelToken(inner) + } + + /// Attempts to cancel the in-progress query on the connection associated + /// with this `CancelToken`. + /// + /// The server provides no information about whether a cancellation attempt was successful or not. An error will + /// only be returned if the client was unable to connect to the database. + /// + /// Cancellation is inherently racy. There is no guarantee that the + /// cancellation request will reach the server before the query terminates + /// normally, or that the connection associated with this token is still + /// active. + pub fn cancel_query(&self, tls: T) -> Result<(), Error> + where + T: MakeTlsConnect, + { + runtime::Builder::new() + .enable_all() + .basic_scheduler() + .build() + .unwrap() // FIXME don't unwrap + .block_on(self.0.cancel_query(tls)) + } +} diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 3e69670dc..dd3495a19 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,4 +1,6 @@ -use crate::{Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction}; +use crate::{ + CancelToken, Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction, +}; use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; @@ -443,6 +445,46 @@ impl Client { Ok(Transaction::new(&mut self.runtime, transaction)) } + /// Constructs a cancellation token that can later be used to request + /// cancellation of a query running on this connection. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, NoTls}; + /// use postgres::error::SqlState; + /// use std::thread; + /// use std::time::Duration; + /// + /// # fn main() -> Result<(), Box> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let cancel_token = client.cancel_token(); + /// + /// thread::spawn(move || { + /// // Abort the query after 5s. + /// thread::sleep(Duration::from_secs(5)); + /// cancel_token.cancel_query(NoTls); + /// }); + /// + /// match client.simple_query("SELECT long_running_query()") { + /// Err(e) if e.code() == Some(&SqlState::QUERY_CANCELED) => { + /// // Handle canceled query. + /// } + /// Err(err) => return Err(err.into()), + /// Ok(rows) => { + /// // ... + /// } + /// } + /// // ... + /// + /// # Ok(()) + /// # } + /// ``` + pub fn cancel_token(&self) -> CancelToken { + CancelToken::new(self.client.cancel_token()) + } + /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 42b28f315..5399bfcee 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -54,6 +54,7 @@ pub use tokio_postgres::{ error, row, tls, types, Column, Portal, SimpleQueryMessage, Socket, Statement, ToStatement, }; +pub use crate::cancel_token::CancelToken; pub use crate::client::*; pub use crate::config::Config; pub use crate::copy_in_writer::CopyInWriter; @@ -68,6 +69,7 @@ pub use crate::tls::NoTls; pub use crate::transaction::*; pub mod binary_copy; +mod cancel_token; mod client; pub mod config; mod copy_in_writer; diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 2275d715a..449aac012 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,7 @@ use std::io::{Read, Write}; +use std::thread; +use std::time::Duration; +use tokio_postgres::error::SqlState; use tokio_postgres::types::Type; use tokio_postgres::NoTls; @@ -288,3 +291,21 @@ fn portal() { assert_eq!(rows.len(), 1); assert_eq!(rows[0].get::<_, i32>(0), 3); } + +#[test] +fn cancel_query() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + let cancel_token = client.cancel_token(); + let cancel_thread = thread::spawn(move || { + thread::sleep(Duration::from_millis(100)); + cancel_token.cancel_query(NoTls).unwrap(); + }); + + match client.batch_execute("SELECT pg_sleep(100)") { + Err(e) if e.code() == Some(&SqlState::QUERY_CANCELED) => {} + t => panic!("unexpected return: {:?}", t), + } + + cancel_thread.join().unwrap(); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 010e8b62e..e5b3682f0 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,4 +1,6 @@ -use crate::{CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement}; +use crate::{ + CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement, +}; use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -168,6 +170,11 @@ impl<'a> Transaction<'a> { self.runtime.block_on(self.transaction.batch_execute(query)) } + /// Like `Client::cancel_token`. + pub fn cancel_token(&self) -> CancelToken { + CancelToken::new(self.transaction.cancel_token()) + } + /// Like `Client::transaction`. pub fn transaction(&mut self) -> Result, Error> { let transaction = self.runtime.block_on(self.transaction.transaction())?; diff --git a/tokio-postgres/src/cancel_token.rs b/tokio-postgres/src/cancel_token.rs new file mode 100644 index 000000000..d048a3c82 --- /dev/null +++ b/tokio-postgres/src/cancel_token.rs @@ -0,0 +1,63 @@ +use crate::config::SslMode; +use crate::tls::TlsConnect; +#[cfg(feature = "runtime")] +use crate::{cancel_query, client::SocketConfig, tls::MakeTlsConnect, Socket}; +use crate::{cancel_query_raw, Error}; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// The capability to request cancellation of in-progress queries on a +/// connection. +#[derive(Clone)] +pub struct CancelToken { + #[cfg(feature = "runtime")] + pub(crate) socket_config: Option, + pub(crate) ssl_mode: SslMode, + pub(crate) process_id: i32, + pub(crate) secret_key: i32, +} + +impl CancelToken { + /// Attempts to cancel the in-progress query on the connection associated + /// with this `CancelToken`. + /// + /// The server provides no information about whether a cancellation attempt was successful or not. An error will + /// only be returned if the client was unable to connect to the database. + /// + /// Cancellation is inherently racy. There is no guarantee that the + /// cancellation request will reach the server before the query terminates + /// normally, or that the connection associated with this token is still + /// active. + /// + /// Requires the `runtime` Cargo feature (enabled by default). + #[cfg(feature = "runtime")] + pub async fn cancel_query(&self, tls: T) -> Result<(), Error> + where + T: MakeTlsConnect, + { + cancel_query::cancel_query( + self.socket_config.clone(), + self.ssl_mode, + tls, + self.process_id, + self.secret_key, + ) + .await + } + + /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new + /// connection itself. + pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsConnect, + { + cancel_query_raw::cancel_query_raw( + stream, + self.ssl_mode, + tls, + self.process_id, + self.secret_key, + ) + .await + } +} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 68ccaf371..d9b4a311b 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "runtime")] -use crate::cancel_query; use crate::codec::BackendMessages; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; @@ -14,7 +12,7 @@ use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{cancel_query_raw, copy_in, copy_out, query, CopyInSink, Transaction}; +use crate::{copy_in, copy_out, query, CancelToken, CopyInSink, Transaction}; use crate::{prepare, SimpleQueryMessage}; use crate::{simple_query, Row}; use crate::{Error, Statement}; @@ -451,6 +449,19 @@ impl Client { Ok(Transaction::new(self)) } + /// Constructs a cancellation token that can later be used to request + /// cancellation of a query running on the connection associated with + /// this client. + pub fn cancel_token(&self) -> CancelToken { + CancelToken { + #[cfg(feature = "runtime")] + socket_config: self.socket_config.clone(), + ssl_mode: self.ssl_mode, + process_id: self.process_id, + secret_key: self.secret_key, + } + } + /// Attempts to cancel an in-progress query. /// /// The server provides no information about whether a cancellation attempt was successful or not. An error will @@ -458,35 +469,23 @@ impl Client { /// /// Requires the `runtime` Cargo feature (enabled by default). #[cfg(feature = "runtime")] + #[deprecated(since = "0.6.0", note = "use Client::cancel_token() instead")] pub async fn cancel_query(&self, tls: T) -> Result<(), Error> where T: MakeTlsConnect, { - cancel_query::cancel_query( - self.socket_config.clone(), - self.ssl_mode, - tls, - self.process_id, - self.secret_key, - ) - .await + self.cancel_token().cancel_query(tls).await } /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. + #[deprecated(since = "0.6.0", note = "use Client::cancel_token() instead")] pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - cancel_query_raw::cancel_query_raw( - stream, - self.ssl_mode, - tls, - self.process_id, - self.secret_key, - ) - .await + self.cancel_token().cancel_query_raw(stream, tls).await } /// Determines if the connection to the server has already closed. diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index dd0d8aba6..ff87baf03 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -99,6 +99,7 @@ #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] +pub use crate::cancel_token::CancelToken; pub use crate::client::Client; pub use crate::config::Config; pub use crate::connection::Connection; @@ -125,6 +126,7 @@ mod bind; #[cfg(feature = "runtime")] mod cancel_query; mod cancel_query_raw; +mod cancel_token; mod client; mod codec; pub mod config; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index c97d5f9f4..f5a396841 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -9,8 +9,8 @@ use crate::types::{ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{ - bind, query, slice_iter, Client, CopyInSink, Error, Portal, Row, SimpleQueryMessage, Statement, - ToStatement, + bind, query, slice_iter, CancelToken, Client, CopyInSink, Error, Portal, Row, + SimpleQueryMessage, Statement, ToStatement, }; use bytes::Buf; use futures::TryStreamExt; @@ -249,21 +249,30 @@ impl<'a> Transaction<'a> { self.client.batch_execute(query).await } + /// Like `Client::cancel_token`. + pub fn cancel_token(&self) -> CancelToken { + self.client.cancel_token() + } + /// Like `Client::cancel_query`. #[cfg(feature = "runtime")] + #[deprecated(since = "0.6.0", note = "use Transaction::cancel_token() instead")] pub async fn cancel_query(&self, tls: T) -> Result<(), Error> where T: MakeTlsConnect, { + #[allow(deprecated)] self.client.cancel_query(tls).await } /// Like `Client::cancel_query_raw`. + #[deprecated(since = "0.6.0", note = "use Transaction::cancel_token() instead")] pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { + #[allow(deprecated)] self.client.cancel_query_raw(stream, tls).await } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 231749526..37ee03516 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -304,7 +304,8 @@ async fn cancel_query_raw() { let client = connect("user=postgres").await; let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); - let cancel = client.cancel_query_raw(socket, NoTls); + let cancel_token = client.cancel_token(); + let cancel = cancel_token.cancel_query_raw(socket, NoTls); let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 35219d8ad..e07aa4a63 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -70,7 +70,8 @@ async fn target_session_attrs_err() { async fn cancel_query() { let client = connect("host=localhost port=5433 user=postgres").await; - let cancel = client.cancel_query(NoTls); + let cancel_token = client.cancel_token(); + let cancel = cancel_token.cancel_query(NoTls); let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); From b73ec018b77585f0bbefbc3b1c125b368b419206 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Sun, 29 Dec 2019 14:53:41 -0500 Subject: [PATCH 362/819] Expose accessors on Config object Fix #534. --- postgres/src/config.rs | 74 ++++++++++++++++++++++++++++++++- tokio-postgres/src/config.rs | 79 +++++++++++++++++++++++++++++++++++- 2 files changed, 151 insertions(+), 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index d50bd024d..f6b151a8e 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -11,7 +11,7 @@ use std::str::FromStr; use std::time::Duration; use tokio::runtime; #[doc(inline)] -pub use tokio_postgres::config::{ChannelBinding, SslMode, TargetSessionAttrs}; +pub use tokio_postgres::config::{ChannelBinding, Host, SslMode, TargetSessionAttrs}; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; @@ -123,6 +123,12 @@ impl Config { self } + /// Gets the user to authenticate with, if one has been configured with + /// the `user` method. + pub fn get_user(&self) -> Option<&str> { + self.config.get_user() + } + /// Sets the password to authenticate with. pub fn password(&mut self, password: T) -> &mut Config where @@ -132,6 +138,12 @@ impl Config { self } + /// Gets the password to authenticate with, if one has been configured with + /// the `password` method. + pub fn get_password(&self) -> Option<&[u8]> { + self.config.get_password() + } + /// Sets the name of the database to connect to. /// /// Defaults to the user. @@ -140,18 +152,36 @@ impl Config { self } + /// Gets the name of the database to connect to, if one has been configured + /// with the `dbname` method. + pub fn get_dbname(&self) -> Option<&str> { + self.config.get_dbname() + } + /// Sets command line options used to configure the server. pub fn options(&mut self, options: &str) -> &mut Config { self.config.options(options); self } + /// Gets the command line options used to configure the server, if the + /// options have been set with the `options` method. + pub fn get_options(&self) -> Option<&str> { + self.config.get_options() + } + /// Sets the value of the `application_name` runtime parameter. pub fn application_name(&mut self, application_name: &str) -> &mut Config { self.config.application_name(application_name); self } + /// Gets the value of the `application_name` runtime parameter, if it has + /// been set with the `application_name` method. + pub fn get_application_name(&self) -> Option<&str> { + self.config.get_application_name() + } + /// Sets the SSL configuration. /// /// Defaults to `prefer`. @@ -160,6 +190,11 @@ impl Config { self } + /// Gets the SSL configuration. + pub fn get_ssl_mode(&self) -> SslMode { + self.config.get_ssl_mode() + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix @@ -169,6 +204,11 @@ impl Config { self } + /// Gets the hosts that have been added to the configuration with `host`. + pub fn get_hosts(&self) -> &[Host] { + self.config.get_hosts() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -191,6 +231,11 @@ impl Config { self } + /// Gets the ports that have been added to the configuration with `port`. + pub fn get_ports(&self) -> &[u16] { + self.config.get_ports() + } + /// Sets the timeout applied to socket-level connection attempts. /// /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each @@ -200,6 +245,12 @@ impl Config { self } + /// Gets the connection timeout, if one has been set with the + /// `connect_timeout` method. + pub fn get_connect_timeout(&self) -> Option<&Duration> { + self.config.get_connect_timeout() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. @@ -208,6 +259,11 @@ impl Config { self } + /// Reports whether TCP keepalives will be used. + pub fn get_keepalives(&self) -> bool { + self.config.get_keepalives() + } + /// Sets the amount of idle time before a keepalive packet is sent on the connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. @@ -216,6 +272,12 @@ impl Config { self } + /// Gets the configured amount of idle time before a keepalive packet will + /// be sent on the connection. + pub fn get_keepalives_idle(&self) -> Duration { + self.config.get_keepalives_idle() + } + /// Sets the requirements of the session. /// /// This can be used to connect to the primary server in a clustered database rather than one of the read-only @@ -228,6 +290,11 @@ impl Config { self } + /// Gets the requirements of the session. + pub fn get_target_session_attrs(&self) -> TargetSessionAttrs { + self.config.get_target_session_attrs() + } + /// Sets the channel binding behavior. /// /// Defaults to `prefer`. @@ -236,6 +303,11 @@ impl Config { self } + /// Gets the channel binding behavior. + pub fn get_channel_binding(&self) -> ChannelBinding { + self.config.get_channel_binding() + } + /// Opens a connection to a PostgreSQL database. pub fn connect(&self, tls: T) -> Result where diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 0cba8ec8d..da171cc79 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -56,9 +56,14 @@ pub enum ChannelBinding { Require, } +/// A host specification. #[derive(Debug, Clone, PartialEq)] -pub(crate) enum Host { +pub enum Host { + /// A TCP hostname. Tcp(String), + /// A path to a directory containing the server's Unix socket. + /// + /// This variant is only available on Unix platforms. #[cfg(unix)] Unix(PathBuf), } @@ -190,6 +195,12 @@ impl Config { self } + /// Gets the user to authenticate with, if one has been configured with + /// the `user` method. + pub fn get_user(&self) -> Option<&str> { + self.user.as_deref() + } + /// Sets the password to authenticate with. pub fn password(&mut self, password: T) -> &mut Config where @@ -199,6 +210,12 @@ impl Config { self } + /// Gets the password to authenticate with, if one has been configured with + /// the `password` method. + pub fn get_password(&self) -> Option<&[u8]> { + self.password.as_deref() + } + /// Sets the name of the database to connect to. /// /// Defaults to the user. @@ -207,18 +224,36 @@ impl Config { self } + /// Gets the name of the database to connect to, if one has been configured + /// with the `dbname` method. + pub fn get_dbname(&self) -> Option<&str> { + self.dbname.as_deref() + } + /// Sets command line options used to configure the server. pub fn options(&mut self, options: &str) -> &mut Config { self.options = Some(options.to_string()); self } + /// Gets the command line options used to configure the server, if the + /// options have been set with the `options` method. + pub fn get_options(&self) -> Option<&str> { + self.options.as_deref() + } + /// Sets the value of the `application_name` runtime parameter. pub fn application_name(&mut self, application_name: &str) -> &mut Config { self.application_name = Some(application_name.to_string()); self } + /// Gets the value of the `application_name` runtime parameter, if it has + /// been set with the `application_name` method. + pub fn get_application_name(&self) -> Option<&str> { + self.application_name.as_deref() + } + /// Sets the SSL configuration. /// /// Defaults to `prefer`. @@ -227,6 +262,11 @@ impl Config { self } + /// Gets the SSL configuration. + pub fn get_ssl_mode(&self) -> SslMode { + self.ssl_mode + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix @@ -243,6 +283,11 @@ impl Config { self } + /// Gets the hosts that have been added to the configuration with `host`. + pub fn get_hosts(&self) -> &[Host] { + &self.host + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -265,6 +310,11 @@ impl Config { self } + /// Gets the ports that have been added to the configuration with `port`. + pub fn get_ports(&self) -> &[u16] { + &self.port + } + /// Sets the timeout applied to socket-level connection attempts. /// /// Note that hostnames can resolve to multiple IP addresses, and this timeout will apply to each address of each @@ -274,6 +324,12 @@ impl Config { self } + /// Gets the connection timeout, if one has been set with the + /// `connect_timeout` method. + pub fn get_connect_timeout(&self) -> Option<&Duration> { + self.connect_timeout.as_ref() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. @@ -282,6 +338,11 @@ impl Config { self } + /// Reports whether TCP keepalives will be used. + pub fn get_keepalives(&self) -> bool { + self.keepalives + } + /// Sets the amount of idle time before a keepalive packet is sent on the connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. @@ -290,6 +351,12 @@ impl Config { self } + /// Gets the configured amount of idle time before a keepalive packet will + /// be sent on the connection. + pub fn get_keepalives_idle(&self) -> Duration { + self.keepalives_idle + } + /// Sets the requirements of the session. /// /// This can be used to connect to the primary server in a clustered database rather than one of the read-only @@ -302,6 +369,11 @@ impl Config { self } + /// Gets the requirements of the session. + pub fn get_target_session_attrs(&self) -> TargetSessionAttrs { + self.target_session_attrs + } + /// Sets the channel binding behavior. /// /// Defaults to `prefer`. @@ -310,6 +382,11 @@ impl Config { self } + /// Gets the channel binding behavior. + pub fn get_channel_binding(&self) -> ChannelBinding { + self.channel_binding + } + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { match key { "user" => { From 8a3edc97dea64c136eb47151d26f0aebf317465a Mon Sep 17 00:00:00 2001 From: Christofer Nolander Date: Sun, 29 Dec 2019 23:05:45 +0100 Subject: [PATCH 363/819] Put unused type parameter back into use --- tokio-postgres/src/transaction.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index c97d5f9f4..d543bc898 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -155,11 +155,7 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute_iter`. - pub async fn execute_raw<'b, I, T>( - &self, - statement: &Statement, - params: I, - ) -> Result + pub async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, I: IntoIterator, From 850512be8b0d93c66a4f053abbc56be398b3f043 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 29 Dec 2019 18:42:35 -0800 Subject: [PATCH 364/819] Remove old dev dependency --- tokio-postgres/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ec43f1aa2..4c16758fe 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -60,6 +60,5 @@ eui48-04 = { version = "0.4", package = "eui48" } geo-types-04 = { version = "0.4", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } -uuid-07 = { version = "0.7", package = "uuid" } uuid-08 = { version = "0.8", package = "uuid" } From 66194766d06cff4523336fe0a190390cd8c5b706 Mon Sep 17 00:00:00 2001 From: Lachezar Lechev <8925621+elpiel@users.noreply.github.com> Date: Thu, 2 Jan 2020 14:08:15 +0200 Subject: [PATCH 365/819] Fix repository link --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ae45eb63d..4ea70f0ae 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" license = "MIT/Apache-2.0" -repository = "https://github.com/sfackler/rust-postgres-protocol" +repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [dependencies] From a865207ff6145121f2921e4e9369fff4b7331797 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 2 Jan 2020 20:44:07 -0500 Subject: [PATCH 366/819] Rename to GenericClient --- postgres/src/client.rs | 7 +++++-- postgres/src/{generic_connection.rs => generic_client.rs} | 2 +- postgres/src/lib.rs | 4 ++-- postgres/src/transaction.rs | 7 +++++-- 4 files changed, 13 insertions(+), 7 deletions(-) rename postgres/src/{generic_connection.rs => generic_client.rs} (96%) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c9ff48071..e34997928 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,5 +1,5 @@ use crate::{ - CancelToken, Config, CopyInWriter, CopyOutReader, GenericConnection, RowIter, Statement, + CancelToken, Config, CopyInWriter, CopyOutReader, GenericClient, RowIter, Statement, ToStatement, Transaction, }; use std::ops::{Deref, DerefMut}; @@ -494,22 +494,25 @@ impl Client { } } -impl GenericConnection for Client { +impl GenericClient for Client { fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement, { self.execute(query, params) } + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement, { self.query(query, params) } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } + fn transaction(&mut self) -> Result, Error> { self.transaction() } diff --git a/postgres/src/generic_connection.rs b/postgres/src/generic_client.rs similarity index 96% rename from postgres/src/generic_connection.rs rename to postgres/src/generic_client.rs index 3a8e67883..354c6bb0a 100644 --- a/postgres/src/generic_connection.rs +++ b/postgres/src/generic_client.rs @@ -3,7 +3,7 @@ use tokio_postgres::types::ToSql; use tokio_postgres::{Error, Row}; /// A trait allowing abstraction over connections and transactions -pub trait GenericConnection { +pub trait GenericClient { /// Like `Client::execute`. fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 2a9da9625..1bc8a8834 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -61,7 +61,7 @@ pub use crate::copy_in_writer::CopyInWriter; pub use crate::copy_out_reader::CopyOutReader; #[doc(no_inline)] pub use crate::error::Error; -pub use crate::generic_connection::GenericConnection; +pub use crate::generic_client::GenericClient; #[doc(no_inline)] pub use crate::row::{Row, SimpleQueryRow}; pub use crate::row_iter::RowIter; @@ -75,7 +75,7 @@ mod client; pub mod config; mod copy_in_writer; mod copy_out_reader; -mod generic_connection; +mod generic_client; mod lazy_pin; mod row_iter; mod transaction; diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 2ed0ae332..c1fdcf06b 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,5 +1,5 @@ use crate::{ - CancelToken, CopyInWriter, CopyOutReader, GenericConnection, Portal, RowIter, Rt, Statement, + CancelToken, CopyInWriter, CopyOutReader, GenericClient, Portal, RowIter, Rt, Statement, ToStatement, }; use tokio::runtime::Runtime; @@ -186,22 +186,25 @@ impl<'a> Transaction<'a> { } } -impl<'a> GenericConnection for Transaction<'a> { +impl<'a> GenericClient for Transaction<'a> { fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement, { self.execute(query, params) } + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement, { self.query(query, params) } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } + fn transaction(&mut self) -> Result, Error> { self.transaction() } From 3ba416ded6d0f5b9a41e980a1b090828eac7be9b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 2 Jan 2020 20:58:38 -0500 Subject: [PATCH 367/819] Add tokio_postgres::GenericClient We have to make the trait methods differ from the normal methods a bit by adding Sync + Send bounds to the ToStatement parameter which is a bit unfortunate, but necessary until GATs async_trait unnecessary. Closes #357 --- postgres/src/generic_client.rs | 7 +++-- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/client.rs | 40 +++++++++++++++++++++++----- tokio-postgres/src/generic_client.rs | 27 +++++++++++++++++++ tokio-postgres/src/lib.rs | 2 ++ tokio-postgres/src/transaction.rs | 30 +++++++++++++++++++++ 6 files changed, 97 insertions(+), 10 deletions(-) create mode 100644 tokio-postgres/src/generic_client.rs diff --git a/postgres/src/generic_client.rs b/postgres/src/generic_client.rs index 354c6bb0a..b586d0d4d 100644 --- a/postgres/src/generic_client.rs +++ b/postgres/src/generic_client.rs @@ -1,8 +1,7 @@ -use crate::{Statement, ToStatement, Transaction}; -use tokio_postgres::types::ToSql; -use tokio_postgres::{Error, Row}; +use crate::types::ToSql; +use crate::{Error, Row, Statement, ToStatement, Transaction}; -/// A trait allowing abstraction over connections and transactions +/// A trait allowing abstraction over connections and transactions. pub trait GenericClient { /// Like `Client::execute`. fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 4c16758fe..dc2c140ae 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -35,6 +35,7 @@ with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] [dependencies] +async-trait = "0.1" bytes = "0.5" byteorder = "1.0" fallible-iterator = "0.2" diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index d9b4a311b..50cca9ddd 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -4,18 +4,17 @@ use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; -use crate::slice_iter; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; -use crate::to_statement::ToStatement; use crate::types::{Oid, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; -use crate::{copy_in, copy_out, query, CancelToken, CopyInSink, Transaction}; -use crate::{prepare, SimpleQueryMessage}; -use crate::{simple_query, Row}; -use crate::{Error, Statement}; +use crate::{ + copy_in, copy_out, prepare, query, simple_query, slice_iter, CancelToken, CopyInSink, Error, + GenericClient, Row, SimpleQueryMessage, Statement, ToStatement, Transaction, +}; +use async_trait::async_trait; use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; @@ -495,3 +494,32 @@ impl Client { self.inner.sender.is_closed() } } + +#[async_trait] +impl GenericClient for Client { + async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.execute(query, params).await + } + + async fn query( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query(query, params).await + } + + async fn prepare(&mut self, query: &str) -> Result { + self.prepare(query).await + } + + async fn transaction(&mut self) -> Result, Error> { + self.transaction().await + } +} diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs new file mode 100644 index 000000000..949ad4f06 --- /dev/null +++ b/tokio-postgres/src/generic_client.rs @@ -0,0 +1,27 @@ +use crate::types::ToSql; +use crate::{Error, Row, Statement, ToStatement, Transaction}; +use async_trait::async_trait; + +/// A trait allowing abstraction over connections and transactions. +#[async_trait] +pub trait GenericClient { + /// Like `Client::execute`. + async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement + Sync + Send; + + /// Like `Client::query`. + async fn query( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send; + + /// Like `Client::prepare`. + async fn prepare(&mut self, query: &str) -> Result; + + /// Like `Client::transaction`. + async fn transaction(&mut self) -> Result, Error>; +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index ff87baf03..984284248 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -107,6 +107,7 @@ pub use crate::copy_in::CopyInSink; pub use crate::copy_out::CopyOutStream; use crate::error::DbError; pub use crate::error::Error; +pub use crate::generic_client::GenericClient; pub use crate::portal::Portal; pub use crate::query::RowStream; pub use crate::row::{Row, SimpleQueryRow}; @@ -140,6 +141,7 @@ mod connection; mod copy_in; mod copy_out; pub mod error; +mod generic_client; mod maybe_tls_stream; mod portal; mod prepare; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index a1ee4f6cb..4fa25a7aa 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -12,6 +12,7 @@ use crate::{ bind, query, slice_iter, CancelToken, Client, CopyInSink, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; +use async_trait::async_trait; use bytes::Buf; use futures::TryStreamExt; use postgres_protocol::message::frontend; @@ -285,3 +286,32 @@ impl<'a> Transaction<'a> { }) } } + +#[async_trait] +impl crate::GenericClient for Transaction<'_> { + async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.execute(query, params).await + } + + async fn query( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query(query, params).await + } + + async fn prepare(&mut self, query: &str) -> Result { + self.prepare(query).await + } + + async fn transaction<'a>(&'a mut self) -> Result, Error> { + self.transaction().await + } +} From 24d4b9233666c1b9517da5ff9f027b1c1f851396 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 2 Jan 2020 21:05:38 -0500 Subject: [PATCH 368/819] Whitelist clippy lint async_trait requires this to be explicit --- tokio-postgres/src/transaction.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 4fa25a7aa..75a939302 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -311,6 +311,7 @@ impl crate::GenericClient for Transaction<'_> { self.prepare(query).await } + #[allow(clippy::needless_lifetimes)] async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await } From 214413d9dc03e892364bc043bae722d57edb3259 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 8 Jan 2020 17:19:27 -0800 Subject: [PATCH 369/819] Add transaction builders Closes #543 --- postgres/src/client.rs | 29 +++++- postgres/src/lib.rs | 5 +- postgres/src/transaction_builder.rs | 47 ++++++++++ tokio-postgres/src/client.rs | 9 ++ tokio-postgres/src/lib.rs | 2 + tokio-postgres/src/transaction_builder.rs | 103 ++++++++++++++++++++++ tokio-postgres/tests/test/main.rs | 39 +++++++- 7 files changed, 231 insertions(+), 3 deletions(-) create mode 100644 postgres/src/transaction_builder.rs create mode 100644 tokio-postgres/src/transaction_builder.rs diff --git a/postgres/src/client.rs b/postgres/src/client.rs index e34997928..ec973229d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,6 +1,6 @@ use crate::{ CancelToken, Config, CopyInWriter, CopyOutReader, GenericClient, RowIter, Statement, - ToStatement, Transaction, + ToStatement, Transaction, TransactionBuilder, }; use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; @@ -486,6 +486,33 @@ impl Client { CancelToken::new(self.client.cancel_token()) } + /// Returns a builder for a transaction with custom settings. + /// + /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other + /// attributes. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, IsolationLevel, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let mut transaction = client.build_transaction() + /// .isolation_level(IsolationLevel::RepeatableRead) + /// .start()?; + /// transaction.execute("UPDATE foo SET bar = 10", &[])?; + /// // ... + /// + /// transaction.commit()?; + /// # Ok(()) + /// # } + /// ``` + pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { + TransactionBuilder::new(&mut self.runtime, self.client.build_transaction()) + } + /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 1bc8a8834..ef669f7dc 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -51,7 +51,8 @@ pub use fallible_iterator; pub use tokio_postgres::{ - error, row, tls, types, Column, Portal, SimpleQueryMessage, Socket, Statement, ToStatement, + error, row, tls, types, Column, IsolationLevel, Portal, SimpleQueryMessage, Socket, Statement, + ToStatement, }; pub use crate::cancel_token::CancelToken; @@ -68,6 +69,7 @@ pub use crate::row_iter::RowIter; #[doc(no_inline)] pub use crate::tls::NoTls; pub use crate::transaction::*; +pub use crate::transaction_builder::TransactionBuilder; pub mod binary_copy; mod cancel_token; @@ -79,6 +81,7 @@ mod generic_client; mod lazy_pin; mod row_iter; mod transaction; +mod transaction_builder; #[cfg(test)] mod test; diff --git a/postgres/src/transaction_builder.rs b/postgres/src/transaction_builder.rs new file mode 100644 index 000000000..6e0c525b7 --- /dev/null +++ b/postgres/src/transaction_builder.rs @@ -0,0 +1,47 @@ +use crate::{Error, IsolationLevel, Transaction}; +use tokio::runtime::Runtime; + +/// A builder for database transactions. +pub struct TransactionBuilder<'a> { + runtime: &'a mut Runtime, + builder: tokio_postgres::TransactionBuilder<'a>, +} + +impl<'a> TransactionBuilder<'a> { + pub(crate) fn new( + runtime: &'a mut Runtime, + builder: tokio_postgres::TransactionBuilder<'a>, + ) -> TransactionBuilder<'a> { + TransactionBuilder { runtime, builder } + } + + /// Sets the isolation level of the transaction. + pub fn isolation_level(mut self, isolation_level: IsolationLevel) -> Self { + self.builder = self.builder.isolation_level(isolation_level); + self + } + + /// Sets the transaction to read-only. + pub fn read_only(mut self) -> Self { + self.builder = self.builder.read_only(); + self + } + + /// Sets the transaction to be deferrable. + /// + /// If the transaction is also serializable and read only, creation of the transaction may block, but when it + /// completes the transaction is able to run with less overhead and a guarantee that it will not be aborted due to + /// serialization failure. + pub fn deferrable(mut self) -> Self { + self.builder = self.builder.deferrable(); + self + } + + /// Begins the transaction. + /// + /// The transaction will roll back by default - use the `commit` method to commit it. + pub fn start(self) -> Result, Error> { + let transaction = self.runtime.block_on(self.builder.start())?; + Ok(Transaction::new(self.runtime, transaction)) + } +} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 50cca9ddd..955435cd6 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -13,6 +13,7 @@ use crate::Socket; use crate::{ copy_in, copy_out, prepare, query, simple_query, slice_iter, CancelToken, CopyInSink, Error, GenericClient, Row, SimpleQueryMessage, Statement, ToStatement, Transaction, + TransactionBuilder, }; use async_trait::async_trait; use bytes::{Buf, BytesMut}; @@ -461,6 +462,14 @@ impl Client { } } + /// Returns a builder for a transaction with custom settings. + /// + /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other + /// attributes. + pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { + TransactionBuilder::new(self) + } + /// Attempts to cancel an in-progress query. /// /// The server provides no information about whether a cancellation attempt was successful or not. An error will diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 984284248..0d8aa8436 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -120,6 +120,7 @@ use crate::tls::MakeTlsConnect; pub use crate::tls::NoTls; pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; +pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder}; use crate::types::ToSql; pub mod binary_copy; @@ -154,6 +155,7 @@ mod statement; pub mod tls; mod to_statement; mod transaction; +mod transaction_builder; pub mod types; /// A convenience function which parses a connection string and connects to the database. diff --git a/tokio-postgres/src/transaction_builder.rs b/tokio-postgres/src/transaction_builder.rs new file mode 100644 index 000000000..6ed7e0dd7 --- /dev/null +++ b/tokio-postgres/src/transaction_builder.rs @@ -0,0 +1,103 @@ +use crate::{Client, Error, Transaction}; + +/// The isolation level of a database transaction. +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum IsolationLevel { + /// Equivalent to `ReadCommitted`. + ReadUncommitted, + + /// An individual statement in the transaction will see rows committed before it began. + ReadCommitted, + + /// All statements in the transaction will see the same view of rows committed before the first query in the + /// transaction. + RepeatableRead, + + /// The reads and writes in this transaction must be able to be committed as an atomic "unit" with respect to reads + /// and writes of all other concurrent serializable transactions without interleaving. + Serializable, +} + +/// A builder for database transactions. +pub struct TransactionBuilder<'a> { + client: &'a mut Client, + isolation_level: Option, + read_only: bool, + deferrable: bool, +} + +impl<'a> TransactionBuilder<'a> { + pub(crate) fn new(client: &'a mut Client) -> TransactionBuilder<'a> { + TransactionBuilder { + client, + isolation_level: None, + read_only: false, + deferrable: false, + } + } + + /// Sets the isolation level of the transaction. + pub fn isolation_level(mut self, isolation_level: IsolationLevel) -> Self { + self.isolation_level = Some(isolation_level); + self + } + + /// Sets the transaction to read-only. + pub fn read_only(mut self) -> Self { + self.read_only = true; + self + } + + /// Sets the transaction to be deferrable. + /// + /// If the transaction is also serializable and read only, creation of the transaction may block, but when it + /// completes the transaction is able to run with less overhead and a guarantee that it will not be aborted due to + /// serialization failure. + pub fn deferrable(mut self) -> Self { + self.deferrable = true; + self + } + + /// Begins the transaction. + /// + /// The transaction will roll back by default - use the `commit` method to commit it. + pub async fn start(self) -> Result, Error> { + let mut query = "START TRANSACTION".to_string(); + let mut first = true; + + if let Some(level) = self.isolation_level { + first = false; + + query.push_str(" ISOLATION LEVEL "); + let level = match level { + IsolationLevel::ReadUncommitted => "READ UNCOMMITTED", + IsolationLevel::ReadCommitted => "READ COMMITTED", + IsolationLevel::RepeatableRead => "REPEATABLE READ", + IsolationLevel::Serializable => "SERIALIZABLE", + }; + query.push_str(level); + } + + if self.read_only { + if !first { + query.push(','); + } + first = false; + + query.push_str(" READ ONLY"); + } + + if self.deferrable { + if !first { + query.push(','); + } + + query.push_str(" DEFERRABLE"); + } + + self.client.batch_execute(&query).await?; + + Ok(Transaction::new(self.client)) + } +} diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 37ee03516..805a76a9e 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -12,7 +12,9 @@ use tokio::time; use tokio_postgres::error::SqlState; use tokio_postgres::tls::{NoTls, NoTlsStream}; use tokio_postgres::types::{Kind, Type}; -use tokio_postgres::{AsyncMessage, Client, Config, Connection, Error, SimpleQueryMessage}; +use tokio_postgres::{ + AsyncMessage, Client, Config, Connection, Error, IsolationLevel, SimpleQueryMessage, +}; mod binary_copy; mod parse; @@ -398,6 +400,41 @@ async fn transaction_rollback_drop() { assert_eq!(rows.len(), 0); } +#[tokio::test] +async fn transaction_builder() { + let mut client = connect("user=postgres").await; + + client + .batch_execute( + "CREATE TEMPORARY TABLE foo( + id SERIAL, + name TEXT + )", + ) + .await + .unwrap(); + + let transaction = client + .build_transaction() + .isolation_level(IsolationLevel::Serializable) + .read_only() + .deferrable() + .start() + .await + .unwrap(); + transaction + .batch_execute("INSERT INTO foo (name) VALUES ('steven')") + .await + .unwrap(); + transaction.commit().await.unwrap(); + + let stmt = client.prepare("SELECT name FROM foo").await.unwrap(); + let rows = client.query(&stmt, &[]).await.unwrap(); + + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, &str>(0), "steven"); +} + #[tokio::test] async fn copy_in() { let client = connect("user=postgres").await; From f1ac3bd9d7dd141e435e3746e4b33f2bb4fa27e5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 8 Jan 2020 17:29:45 -0800 Subject: [PATCH 370/819] Always include Client::configure --- postgres/src/client.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index ec973229d..bbe4a8bca 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -58,9 +58,6 @@ impl Client { } /// Returns a new `Config` object which can be used to configure and connect to a database. - /// - /// Requires the `runtime` Cargo feature (enabled by default). - #[cfg(feature = "runtime")] pub fn configure() -> Config { Config::new() } From a54ee297049b96bf333e26819822681eca1ef42c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 10 Jan 2020 17:49:18 -0800 Subject: [PATCH 371/819] Allow opt-out of transaction settings If you changed the default transaction settings of the session, you may need an explicit opt-out. --- postgres/src/transaction_builder.rs | 12 +++---- tokio-postgres/src/transaction_builder.rs | 38 ++++++++++++++--------- tokio-postgres/tests/test/main.rs | 4 +-- 3 files changed, 32 insertions(+), 22 deletions(-) diff --git a/postgres/src/transaction_builder.rs b/postgres/src/transaction_builder.rs index 6e0c525b7..d87d1a128 100644 --- a/postgres/src/transaction_builder.rs +++ b/postgres/src/transaction_builder.rs @@ -21,19 +21,19 @@ impl<'a> TransactionBuilder<'a> { self } - /// Sets the transaction to read-only. - pub fn read_only(mut self) -> Self { - self.builder = self.builder.read_only(); + /// Sets the access mode of the transaction. + pub fn read_only(mut self, read_only: bool) -> Self { + self.builder = self.builder.read_only(read_only); self } - /// Sets the transaction to be deferrable. + /// Sets the deferrability of the transaction. /// /// If the transaction is also serializable and read only, creation of the transaction may block, but when it /// completes the transaction is able to run with less overhead and a guarantee that it will not be aborted due to /// serialization failure. - pub fn deferrable(mut self) -> Self { - self.builder = self.builder.deferrable(); + pub fn deferrable(mut self, deferrable: bool) -> Self { + self.builder = self.builder.deferrable(deferrable); self } diff --git a/tokio-postgres/src/transaction_builder.rs b/tokio-postgres/src/transaction_builder.rs index 6ed7e0dd7..9718ac588 100644 --- a/tokio-postgres/src/transaction_builder.rs +++ b/tokio-postgres/src/transaction_builder.rs @@ -23,8 +23,8 @@ pub enum IsolationLevel { pub struct TransactionBuilder<'a> { client: &'a mut Client, isolation_level: Option, - read_only: bool, - deferrable: bool, + read_only: Option, + deferrable: Option, } impl<'a> TransactionBuilder<'a> { @@ -32,8 +32,8 @@ impl<'a> TransactionBuilder<'a> { TransactionBuilder { client, isolation_level: None, - read_only: false, - deferrable: false, + read_only: None, + deferrable: None, } } @@ -43,19 +43,19 @@ impl<'a> TransactionBuilder<'a> { self } - /// Sets the transaction to read-only. - pub fn read_only(mut self) -> Self { - self.read_only = true; + /// Sets the access mode of the transaction. + pub fn read_only(mut self, read_only: bool) -> Self { + self.read_only = Some(read_only); self } - /// Sets the transaction to be deferrable. + /// Sets the deferrability of the transaction. /// /// If the transaction is also serializable and read only, creation of the transaction may block, but when it /// completes the transaction is able to run with less overhead and a guarantee that it will not be aborted due to /// serialization failure. - pub fn deferrable(mut self) -> Self { - self.deferrable = true; + pub fn deferrable(mut self, deferrable: bool) -> Self { + self.deferrable = Some(deferrable); self } @@ -79,21 +79,31 @@ impl<'a> TransactionBuilder<'a> { query.push_str(level); } - if self.read_only { + if let Some(read_only) = self.read_only { if !first { query.push(','); } first = false; - query.push_str(" READ ONLY"); + let s = if read_only { + " READ ONLY" + } else { + " READ WRITE" + }; + query.push_str(s); } - if self.deferrable { + if let Some(deferrable) = self.deferrable { if !first { query.push(','); } - query.push_str(" DEFERRABLE"); + let s = if deferrable { + " DEFERRABLE" + } else { + " NOT DEFERRABLE" + }; + query.push_str(s); } self.client.batch_execute(&query).await?; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 805a76a9e..92f1edce6 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -417,8 +417,8 @@ async fn transaction_builder() { let transaction = client .build_transaction() .isolation_level(IsolationLevel::Serializable) - .read_only() - .deferrable() + .read_only(true) + .deferrable(true) .start() .await .unwrap(); From afc9b2835d84aed76ae6f64d11b507e34e775585 Mon Sep 17 00:00:00 2001 From: dvic Date: Mon, 27 Jan 2020 10:20:30 +0100 Subject: [PATCH 372/819] Add missing methods to GenericClient This adds `execute_raw`, `query_one`, `query_opt`, `query_raw`, and `prepare_typed` to the generic trait introduced in #525. --- tokio-postgres/src/client.rs | 57 ++++++++++++++++++++++++++++ tokio-postgres/src/generic_client.rs | 42 +++++++++++++++++++- tokio-postgres/src/transaction.rs | 57 ++++++++++++++++++++++++++++ 3 files changed, 155 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 955435cd6..f87e11ba3 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -506,6 +506,7 @@ impl Client { #[async_trait] impl GenericClient for Client { + /// Like `Client::execute`. async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -513,6 +514,17 @@ impl GenericClient for Client { self.execute(query, params).await } + /// Like `Client::execute_raw`. + async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.execute_raw(statement, params).await + } + + /// Like `Client::query`. async fn query( &mut self, query: &T, @@ -524,10 +536,55 @@ impl GenericClient for Client { self.query(query, params).await } + /// Like `Client::query_one`. + async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_one(statement, params).await + } + + /// Like `Client::query_opt`. + async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_opt(statement, params).await + } + + /// Like `Client::query_raw`. + async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(statement, params).await + } + + /// Like `Client::prepare`. async fn prepare(&mut self, query: &str) -> Result { self.prepare(query).await } + /// Like `Client::prepare_typed`. + async fn prepare_typed( + &self, + query: &str, + parameter_types: &[Type], + ) -> Result { + self.prepare_typed(query, parameter_types).await + } + + /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error> { self.transaction().await } diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 949ad4f06..534737374 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -1,4 +1,5 @@ -use crate::types::ToSql; +use crate::query::RowStream; +use crate::types::{ToSql, Type}; use crate::{Error, Row, Statement, ToStatement, Transaction}; use async_trait::async_trait; @@ -10,6 +11,13 @@ pub trait GenericClient { where T: ?Sized + ToStatement + Sync + Send; + /// Like `Client::execute_raw`. + async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator; + /// Like `Client::query`. async fn query( &mut self, @@ -19,9 +27,41 @@ pub trait GenericClient { where T: ?Sized + ToStatement + Sync + Send; + /// Like `Client::query_one`. + async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + Sync + Send; + + /// Like `Client::query_opt`. + async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send; + + /// Like `Client::query_raw`. + async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator; + /// Like `Client::prepare`. async fn prepare(&mut self, query: &str) -> Result; + /// Like `Client::prepare_typed`. + async fn prepare_typed( + &self, + query: &str, + parameter_types: &[Type], + ) -> Result; + /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error>; } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 75a939302..b165cbedc 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -289,6 +289,7 @@ impl<'a> Transaction<'a> { #[async_trait] impl crate::GenericClient for Transaction<'_> { + /// Like `Transaction::execute`. async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -296,6 +297,17 @@ impl crate::GenericClient for Transaction<'_> { self.execute(query, params).await } + /// Like `Transaction::execute_raw`. + async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.execute_raw(statement, params).await + } + + /// Like `Transaction::query`. async fn query( &mut self, query: &T, @@ -307,10 +319,55 @@ impl crate::GenericClient for Transaction<'_> { self.query(query, params).await } + /// Like `Transaction::query_one`. + async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_one(statement, params).await + } + + /// Like `Transaction::query_opt`. + async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_opt(statement, params).await + } + + /// Like `Transaction::query_raw`. + async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(statement, params).await + } + + /// Like `Transaction::prepare`. async fn prepare(&mut self, query: &str) -> Result { self.prepare(query).await } + /// Like `Transaction::prepare_typed`. + async fn prepare_typed( + &self, + query: &str, + parameter_types: &[Type], + ) -> Result { + self.prepare_typed(query, parameter_types).await + } + + /// Like `Transaction::transaction`. #[allow(clippy::needless_lifetimes)] async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await From 1ea8b7b2d467b2da49c94de522dd6ec054b5d159 Mon Sep 17 00:00:00 2001 From: dvic Date: Mon, 27 Jan 2020 11:14:27 +0100 Subject: [PATCH 373/819] Fix self references in GenericClient --- tokio-postgres/src/client.rs | 10 +++------- tokio-postgres/src/generic_client.rs | 10 +++------- tokio-postgres/src/transaction.rs | 10 +++------- 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index f87e11ba3..96e724d48 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -507,7 +507,7 @@ impl Client { #[async_trait] impl GenericClient for Client { /// Like `Client::execute`. - async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, { @@ -525,11 +525,7 @@ impl GenericClient for Client { } /// Like `Client::query`. - async fn query( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send, { @@ -571,7 +567,7 @@ impl GenericClient for Client { } /// Like `Client::prepare`. - async fn prepare(&mut self, query: &str) -> Result { + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 534737374..cfbd83e93 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; #[async_trait] pub trait GenericClient { /// Like `Client::execute`. - async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send; @@ -19,11 +19,7 @@ pub trait GenericClient { I::IntoIter: ExactSizeIterator; /// Like `Client::query`. - async fn query( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send; @@ -53,7 +49,7 @@ pub trait GenericClient { I::IntoIter: ExactSizeIterator; /// Like `Client::prepare`. - async fn prepare(&mut self, query: &str) -> Result; + async fn prepare(&self, query: &str) -> Result; /// Like `Client::prepare_typed`. async fn prepare_typed( diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index b165cbedc..6d02ec523 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -290,7 +290,7 @@ impl<'a> Transaction<'a> { #[async_trait] impl crate::GenericClient for Transaction<'_> { /// Like `Transaction::execute`. - async fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, { @@ -308,11 +308,7 @@ impl crate::GenericClient for Transaction<'_> { } /// Like `Transaction::query`. - async fn query( - &mut self, - query: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> + async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send, { @@ -354,7 +350,7 @@ impl crate::GenericClient for Transaction<'_> { } /// Like `Transaction::prepare`. - async fn prepare(&mut self, query: &str) -> Result { + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } From 5d08af01ec520cba1a8642cb7c66ce070b03f4ca Mon Sep 17 00:00:00 2001 From: dvic Date: Mon, 27 Jan 2020 15:25:02 +0100 Subject: [PATCH 374/819] Remove unnecessary impl GenericClient docs --- tokio-postgres/src/client.rs | 9 --------- tokio-postgres/src/transaction.rs | 9 --------- 2 files changed, 18 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 96e724d48..78f295e30 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -506,7 +506,6 @@ impl Client { #[async_trait] impl GenericClient for Client { - /// Like `Client::execute`. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -514,7 +513,6 @@ impl GenericClient for Client { self.execute(query, params).await } - /// Like `Client::execute_raw`. async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -524,7 +522,6 @@ impl GenericClient for Client { self.execute_raw(statement, params).await } - /// Like `Client::query`. async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send, @@ -532,7 +529,6 @@ impl GenericClient for Client { self.query(query, params).await } - /// Like `Client::query_one`. async fn query_one( &self, statement: &T, @@ -544,7 +540,6 @@ impl GenericClient for Client { self.query_one(statement, params).await } - /// Like `Client::query_opt`. async fn query_opt( &self, statement: &T, @@ -556,7 +551,6 @@ impl GenericClient for Client { self.query_opt(statement, params).await } - /// Like `Client::query_raw`. async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -566,12 +560,10 @@ impl GenericClient for Client { self.query_raw(statement, params).await } - /// Like `Client::prepare`. async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } - /// Like `Client::prepare_typed`. async fn prepare_typed( &self, query: &str, @@ -580,7 +572,6 @@ impl GenericClient for Client { self.prepare_typed(query, parameter_types).await } - /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error> { self.transaction().await } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 6d02ec523..c75087377 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -289,7 +289,6 @@ impl<'a> Transaction<'a> { #[async_trait] impl crate::GenericClient for Transaction<'_> { - /// Like `Transaction::execute`. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -297,7 +296,6 @@ impl crate::GenericClient for Transaction<'_> { self.execute(query, params).await } - /// Like `Transaction::execute_raw`. async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -307,7 +305,6 @@ impl crate::GenericClient for Transaction<'_> { self.execute_raw(statement, params).await } - /// Like `Transaction::query`. async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send, @@ -315,7 +312,6 @@ impl crate::GenericClient for Transaction<'_> { self.query(query, params).await } - /// Like `Transaction::query_one`. async fn query_one( &self, statement: &T, @@ -327,7 +323,6 @@ impl crate::GenericClient for Transaction<'_> { self.query_one(statement, params).await } - /// Like `Transaction::query_opt`. async fn query_opt( &self, statement: &T, @@ -339,7 +334,6 @@ impl crate::GenericClient for Transaction<'_> { self.query_opt(statement, params).await } - /// Like `Transaction::query_raw`. async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -349,12 +343,10 @@ impl crate::GenericClient for Transaction<'_> { self.query_raw(statement, params).await } - /// Like `Transaction::prepare`. async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } - /// Like `Transaction::prepare_typed`. async fn prepare_typed( &self, query: &str, @@ -363,7 +355,6 @@ impl crate::GenericClient for Transaction<'_> { self.prepare_typed(query, parameter_types).await } - /// Like `Transaction::transaction`. #[allow(clippy::needless_lifetimes)] async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await From 7ea1b2d7852f7fa7ab662cfb512128bcc8972353 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Fri, 31 Jan 2020 00:03:31 -0500 Subject: [PATCH 375/819] Don't suppress notices during startup flow NoticeResponses received during the startup flow were previously being dropped on the floor. Instead stash them away so they can be delivered to the user after the startup flow is complete. --- tokio-postgres/src/connect_raw.rs | 10 +++++--- tokio-postgres/src/connection.rs | 9 +++---- tokio-postgres/tests/test/main.rs | 39 +++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 7ccfe9b5c..d07d5a2df 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -13,7 +13,7 @@ use postgres_protocol::authentication::sasl; use postgres_protocol::authentication::sasl::ScramSha256; use postgres_protocol::message::backend::{AuthenticationSaslBody, Message}; use postgres_protocol::message::frontend; -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; @@ -23,6 +23,7 @@ use tokio_util::codec::Framed; pub struct StartupStream { inner: Framed, PostgresCodec>, buf: BackendMessages, + delayed: VecDeque, } impl Sink for StartupStream @@ -91,6 +92,7 @@ where let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), buf: BackendMessages::empty(), + delayed: VecDeque::new(), }; startup(&mut stream, config).await?; @@ -99,7 +101,7 @@ where let (sender, receiver) = mpsc::unbounded(); let client = Client::new(sender, config.ssl_mode, process_id, secret_key); - let connection = Connection::new(stream.inner, parameters, receiver); + let connection = Connection::new(stream.inner, stream.delayed, parameters, receiver); Ok((client, connection)) } @@ -332,7 +334,9 @@ where body.value().map_err(Error::parse)?.to_string(), ); } - Some(Message::NoticeResponse(_)) => {} + Some(msg @ Message::NoticeResponse(_)) => { + stream.delayed.push_back(BackendMessage::Async(msg)) + } Some(Message::ReadyForQuery(_)) => return Ok((process_id, secret_key, parameters)), Some(Message::ErrorResponse(body)) => return Err(Error::db(body)), Some(_) => return Err(Error::unexpected_message()), diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 5b0144286..ac1867431 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -52,7 +52,7 @@ pub struct Connection { parameters: HashMap, receiver: mpsc::UnboundedReceiver, pending_request: Option, - pending_response: Option, + pending_responses: VecDeque, responses: VecDeque, state: State, } @@ -64,6 +64,7 @@ where { pub(crate) fn new( stream: Framed, PostgresCodec>, + pending_responses: VecDeque, parameters: HashMap, receiver: mpsc::UnboundedReceiver, ) -> Connection { @@ -72,7 +73,7 @@ where parameters, receiver, pending_request: None, - pending_response: None, + pending_responses, responses: VecDeque::new(), state: State::Active, } @@ -82,7 +83,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll>> { - if let Some(message) = self.pending_response.take() { + if let Some(message) = self.pending_responses.pop_front() { trace!("retrying pending response"); return Poll::Ready(Some(Ok(message))); } @@ -158,7 +159,7 @@ where } Poll::Pending => { self.responses.push_front(response); - self.pending_response = Some(BackendMessage::Normal { + self.pending_responses.push_back(BackendMessage::Normal { messages, request_complete, }); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 92f1edce6..738601159 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -570,6 +570,45 @@ async fn copy_out() { assert_eq!(&data[..], b"1\tjim\n2\tjoe\n"); } +#[tokio::test] +async fn notices() { + let long_name = "x".repeat(65); + let (client, mut connection) = + connect_raw(&format!("user=postgres application_name={}", long_name,)) + .await + .unwrap(); + + let (tx, rx) = mpsc::unbounded(); + let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let connection = stream.forward(tx).map(|r| r.unwrap()); + tokio::spawn(connection); + + client + .batch_execute("DROP DATABASE IF EXISTS noexistdb") + .await + .unwrap(); + + drop(client); + + let notices = rx + .filter_map(|m| match m { + AsyncMessage::Notice(n) => future::ready(Some(n)), + _ => future::ready(None), + }) + .collect::>() + .await; + assert_eq!(notices.len(), 2); + assert_eq!( + notices[0].message(), + "identifier \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" \ + will be truncated to \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"" + ); + assert_eq!( + notices[1].message(), + "database \"noexistdb\" does not exist, skipping" + ); +} + #[tokio::test] async fn notifications() { let (client, mut connection) = connect_raw("user=postgres").await.unwrap(); From 27635a20cac4f54dae31507c57bfae939e63a842 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 31 Jan 2020 13:59:37 -0800 Subject: [PATCH 376/819] Clean up GenericClient traits --- postgres/src/client.rs | 28 +--- postgres/src/generic_client.rs | 213 ++++++++++++++++++++++++++- postgres/src/transaction.rs | 27 +--- tokio-postgres/src/client.rs | 77 +--------- tokio-postgres/src/generic_client.rs | 161 +++++++++++++++++++- tokio-postgres/src/transaction.rs | 75 ---------- 6 files changed, 373 insertions(+), 208 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index bbe4a8bca..908345085 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,6 +1,6 @@ use crate::{ - CancelToken, Config, CopyInWriter, CopyOutReader, GenericClient, RowIter, Statement, - ToStatement, Transaction, TransactionBuilder, + CancelToken, Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction, + TransactionBuilder, }; use std::ops::{Deref, DerefMut}; use tokio::runtime::Runtime; @@ -517,27 +517,3 @@ impl Client { self.client.is_closed() } } - -impl GenericClient for Client { - fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result - where - T: ?Sized + ToStatement, - { - self.execute(query, params) - } - - fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> - where - T: ?Sized + ToStatement, - { - self.query(query, params) - } - - fn prepare(&mut self, query: &str) -> Result { - self.prepare(query) - } - - fn transaction(&mut self) -> Result, Error> { - self.transaction() - } -} diff --git a/postgres/src/generic_client.rs b/postgres/src/generic_client.rs index b586d0d4d..42a466df6 100644 --- a/postgres/src/generic_client.rs +++ b/postgres/src/generic_client.rs @@ -1,8 +1,17 @@ -use crate::types::ToSql; -use crate::{Error, Row, Statement, ToStatement, Transaction}; +use crate::types::{ToSql, Type}; +use crate::{ + Client, CopyInWriter, CopyOutReader, Error, Row, RowIter, SimpleQueryMessage, Statement, + ToStatement, Transaction, +}; + +mod private { + pub trait Sealed {} +} /// A trait allowing abstraction over connections and transactions. -pub trait GenericClient { +/// +/// This trait is "sealed", and cannot be implemented outside of this crate. +pub trait GenericClient: private::Sealed { /// Like `Client::execute`. fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where @@ -13,9 +22,207 @@ pub trait GenericClient { where T: ?Sized + ToStatement; + /// Like `Client::query_one`. + fn query_one(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement; + + /// Like `Client::query_opt`. + fn query_opt( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement; + + /// Like `Client::query_raw`. + fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + where + T: ?Sized + ToStatement, + I: IntoIterator, + I::IntoIter: ExactSizeIterator; + /// Like `Client::prepare`. fn prepare(&mut self, query: &str) -> Result; + /// Like `Client::prepare_typed`. + fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result; + + /// Like `Client::copy_in`. + fn copy_in(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement; + + /// Like `Client::copy_out`. + fn copy_out(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement; + + /// Like `Client::simple_query`. + fn simple_query(&mut self, query: &str) -> Result, Error>; + + /// Like `Client::batch_execute`. + fn batch_execute(&mut self, query: &str) -> Result<(), Error>; + /// Like `Client::transaction`. fn transaction(&mut self) -> Result, Error>; } + +impl private::Sealed for Client {} + +impl GenericClient for Client { + fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + self.execute(query, params) + } + + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.query(query, params) + } + + fn query_one(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + self.query_one(query, params) + } + + fn query_opt( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.query_opt(query, params) + } + + fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + where + T: ?Sized + ToStatement, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(query, params) + } + + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } + + fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { + self.prepare_typed(query, types) + } + + fn copy_in(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.copy_in(query) + } + + fn copy_out(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.copy_out(query) + } + + fn simple_query(&mut self, query: &str) -> Result, Error> { + self.simple_query(query) + } + + fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + self.batch_execute(query) + } + + fn transaction(&mut self) -> Result, Error> { + self.transaction() + } +} + +impl private::Sealed for Transaction<'_> {} + +impl GenericClient for Transaction<'_> { + fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + self.execute(query, params) + } + + fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.query(query, params) + } + + fn query_one(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement, + { + self.query_one(query, params) + } + + fn query_opt( + &mut self, + query: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.query_opt(query, params) + } + + fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + where + T: ?Sized + ToStatement, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(query, params) + } + + fn prepare(&mut self, query: &str) -> Result { + self.prepare(query) + } + + fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { + self.prepare_typed(query, types) + } + + fn copy_in(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.copy_in(query) + } + + fn copy_out(&mut self, query: &T) -> Result, Error> + where + T: ?Sized + ToStatement, + { + self.copy_out(query) + } + + fn simple_query(&mut self, query: &str) -> Result, Error> { + self.simple_query(query) + } + + fn batch_execute(&mut self, query: &str) -> Result<(), Error> { + self.batch_execute(query) + } + + fn transaction(&mut self) -> Result, Error> { + self.transaction() + } +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index c1fdcf06b..e5b3682f0 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,6 +1,5 @@ use crate::{ - CancelToken, CopyInWriter, CopyOutReader, GenericClient, Portal, RowIter, Rt, Statement, - ToStatement, + CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement, }; use tokio::runtime::Runtime; use tokio_postgres::types::{ToSql, Type}; @@ -185,27 +184,3 @@ impl<'a> Transaction<'a> { }) } } - -impl<'a> GenericClient for Transaction<'a> { - fn execute(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result - where - T: ?Sized + ToStatement, - { - self.execute(query, params) - } - - fn query(&mut self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> - where - T: ?Sized + ToStatement, - { - self.query(query, params) - } - - fn prepare(&mut self, query: &str) -> Result { - self.prepare(query) - } - - fn transaction(&mut self) -> Result, Error> { - self.transaction() - } -} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 78f295e30..84ebfcbb4 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -12,10 +12,8 @@ use crate::types::{Oid, ToSql, Type}; use crate::Socket; use crate::{ copy_in, copy_out, prepare, query, simple_query, slice_iter, CancelToken, CopyInSink, Error, - GenericClient, Row, SimpleQueryMessage, Statement, ToStatement, Transaction, - TransactionBuilder, + Row, SimpleQueryMessage, Statement, ToStatement, Transaction, TransactionBuilder, }; -use async_trait::async_trait; use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use futures::channel::mpsc; @@ -503,76 +501,3 @@ impl Client { self.inner.sender.is_closed() } } - -#[async_trait] -impl GenericClient for Client { - async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - { - self.execute(query, params).await - } - - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, - I::IntoIter: ExactSizeIterator, - { - self.execute_raw(statement, params).await - } - - async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query(query, params).await - } - - async fn query_one( - &self, - statement: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query_one(statement, params).await - } - - async fn query_opt( - &self, - statement: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query_opt(statement, params).await - } - - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, - I::IntoIter: ExactSizeIterator, - { - self.query_raw(statement, params).await - } - - async fn prepare(&self, query: &str) -> Result { - self.prepare(query).await - } - - async fn prepare_typed( - &self, - query: &str, - parameter_types: &[Type], - ) -> Result { - self.prepare_typed(query, parameter_types).await - } - - async fn transaction(&mut self) -> Result, Error> { - self.transaction().await - } -} diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index cfbd83e93..30351bd0a 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -1,11 +1,17 @@ use crate::query::RowStream; use crate::types::{ToSql, Type}; -use crate::{Error, Row, Statement, ToStatement, Transaction}; +use crate::{Client, Error, Row, Statement, ToStatement, Transaction}; use async_trait::async_trait; +mod private { + pub trait Sealed {} +} + /// A trait allowing abstraction over connections and transactions. +/// +/// This trait is "sealed", and cannot be implemented outside of this crate. #[async_trait] -pub trait GenericClient { +pub trait GenericClient: private::Sealed { /// Like `Client::execute`. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where @@ -61,3 +67,154 @@ pub trait GenericClient { /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error>; } + +impl private::Sealed for Client {} + +#[async_trait] +impl GenericClient for Client { + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.execute(query, params).await + } + + async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.execute_raw(statement, params).await + } + + async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query(query, params).await + } + + async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_one(statement, params).await + } + + async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_opt(statement, params).await + } + + async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(statement, params).await + } + + async fn prepare(&self, query: &str) -> Result { + self.prepare(query).await + } + + async fn prepare_typed( + &self, + query: &str, + parameter_types: &[Type], + ) -> Result { + self.prepare_typed(query, parameter_types).await + } + + async fn transaction(&mut self) -> Result, Error> { + self.transaction().await + } +} + +impl private::Sealed for Transaction<'_> {} + +#[async_trait] +impl GenericClient for Transaction<'_> { + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.execute(query, params).await + } + + async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.execute_raw(statement, params).await + } + + async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query(query, params).await + } + + async fn query_one( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_one(statement, params).await + } + + async fn query_opt( + &self, + statement: &T, + params: &[&(dyn ToSql + Sync)], + ) -> Result, Error> + where + T: ?Sized + ToStatement + Sync + Send, + { + self.query_opt(statement, params).await + } + + async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + where + T: ?Sized + ToStatement + Sync + Send, + I: IntoIterator + Sync + Send, + I::IntoIter: ExactSizeIterator, + { + self.query_raw(statement, params).await + } + + async fn prepare(&self, query: &str) -> Result { + self.prepare(query).await + } + + async fn prepare_typed( + &self, + query: &str, + parameter_types: &[Type], + ) -> Result { + self.prepare_typed(query, parameter_types).await + } + + #[allow(clippy::needless_lifetimes)] + async fn transaction<'a>(&'a mut self) -> Result, Error> { + self.transaction().await + } +} diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index c75087377..a1ee4f6cb 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -12,7 +12,6 @@ use crate::{ bind, query, slice_iter, CancelToken, Client, CopyInSink, Error, Portal, Row, SimpleQueryMessage, Statement, ToStatement, }; -use async_trait::async_trait; use bytes::Buf; use futures::TryStreamExt; use postgres_protocol::message::frontend; @@ -286,77 +285,3 @@ impl<'a> Transaction<'a> { }) } } - -#[async_trait] -impl crate::GenericClient for Transaction<'_> { - async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - { - self.execute(query, params).await - } - - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, - I::IntoIter: ExactSizeIterator, - { - self.execute_raw(statement, params).await - } - - async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query(query, params).await - } - - async fn query_one( - &self, - statement: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query_one(statement, params).await - } - - async fn query_opt( - &self, - statement: &T, - params: &[&(dyn ToSql + Sync)], - ) -> Result, Error> - where - T: ?Sized + ToStatement + Sync + Send, - { - self.query_opt(statement, params).await - } - - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result - where - T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, - I::IntoIter: ExactSizeIterator, - { - self.query_raw(statement, params).await - } - - async fn prepare(&self, query: &str) -> Result { - self.prepare(query).await - } - - async fn prepare_typed( - &self, - query: &str, - parameter_types: &[Type], - ) -> Result { - self.prepare_typed(query, parameter_types).await - } - - #[allow(clippy::needless_lifetimes)] - async fn transaction<'a>(&'a mut self) -> Result, Error> { - self.transaction().await - } -} From 4bf40cd1d3eadade2f807fad53a06465d50bf62f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 31 Jan 2020 14:01:16 -0800 Subject: [PATCH 377/819] Reorder methods --- postgres/src/client.rs | 54 ++++++++++++++++++------------------ tokio-postgres/src/client.rs | 16 +++++------ 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 908345085..fb76f0f3c 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -443,6 +443,33 @@ impl Client { Ok(Transaction::new(&mut self.runtime, transaction)) } + /// Returns a builder for a transaction with custom settings. + /// + /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other + /// attributes. + /// + /// # Examples + /// + /// ```no_run + /// use postgres::{Client, IsolationLevel, NoTls}; + /// + /// # fn main() -> Result<(), postgres::Error> { + /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let mut transaction = client.build_transaction() + /// .isolation_level(IsolationLevel::RepeatableRead) + /// .start()?; + /// transaction.execute("UPDATE foo SET bar = 10", &[])?; + /// // ... + /// + /// transaction.commit()?; + /// # Ok(()) + /// # } + /// ``` + pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { + TransactionBuilder::new(&mut self.runtime, self.client.build_transaction()) + } + /// Constructs a cancellation token that can later be used to request /// cancellation of a query running on this connection. /// @@ -483,33 +510,6 @@ impl Client { CancelToken::new(self.client.cancel_token()) } - /// Returns a builder for a transaction with custom settings. - /// - /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other - /// attributes. - /// - /// # Examples - /// - /// ```no_run - /// use postgres::{Client, IsolationLevel, NoTls}; - /// - /// # fn main() -> Result<(), postgres::Error> { - /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; - /// - /// let mut transaction = client.build_transaction() - /// .isolation_level(IsolationLevel::RepeatableRead) - /// .start()?; - /// transaction.execute("UPDATE foo SET bar = 10", &[])?; - /// // ... - /// - /// transaction.commit()?; - /// # Ok(()) - /// # } - /// ``` - pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { - TransactionBuilder::new(&mut self.runtime, self.client.build_transaction()) - } - /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 84ebfcbb4..4cd0b31ce 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -447,6 +447,14 @@ impl Client { Ok(Transaction::new(self)) } + /// Returns a builder for a transaction with custom settings. + /// + /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other + /// attributes. + pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { + TransactionBuilder::new(self) + } + /// Constructs a cancellation token that can later be used to request /// cancellation of a query running on the connection associated with /// this client. @@ -460,14 +468,6 @@ impl Client { } } - /// Returns a builder for a transaction with custom settings. - /// - /// Unlike the `transaction` method, the builder can be used to control the transaction's isolation level and other - /// attributes. - pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { - TransactionBuilder::new(self) - } - /// Attempts to cancel an in-progress query. /// /// The server provides no information about whether a cancellation attempt was successful or not. An error will From d88bccc27c021fcc5727e563a8ff5ec38f403a49 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 31 Jan 2020 18:00:05 -0800 Subject: [PATCH 378/819] Release tokio-postgres v0.5.2 --- tokio-postgres/CHANGELOG.md | 18 ++++++++++++++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 95bf19bc7..c40f7e133 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,23 @@ # Change Log +## v0.5.2 - 2020-01-31 + +### Fixed + +* Notice messages sent during the initial connection process are now collected and returned first from + `Connection::poll_message`. + +### Deprecated + +* Deprecated `Client::cancel_query` and `Client::cancel_query_raw` in favor of `Client::cancel_token`. + +### Added + +* Added `Client::build_transaction` to allow configuration of various transaction options. +* Added `Client::cancel_token`, which returns a separate owned object that can be used to cancel queries. +* Added accessors for `Client` fields. +* Added a `GenericClient` trait implemented for `Client` and `Transaction` and covering shared functionality. + ## v0.5.1 - 2019-12-25 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index dc2c140ae..2f3dd40c3 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.1" +version = "0.5.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From cc9b8232a858099e11f8b9e8632566f9b6103a81 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 31 Jan 2020 18:04:47 -0800 Subject: [PATCH 379/819] Release postgres v0.17.1 --- postgres/CHANGELOG.md | 9 +++++++++ postgres/Cargo.toml | 4 ++-- tokio-postgres/CHANGELOG.md | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 686830db1..988578d46 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,14 @@ # Change Log +## v0.17.1 - 2020-01-31 + +### Added + +* Added `Client::build_transaction` to allow configuration of various transaction options. +* Added `Client::cancel_token`, which returns a separate owned object that can be used to cancel queries. +* Added accessors for `Config` fields. +* Added a `GenericClient` trait implemented for `Client` and `Transaction` and covering shared functionality. + ## v0.17.0 - 2019-12-23 ### Changed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 41d500325..6ba57b113 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.0" +version = "0.17.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -32,7 +32,7 @@ with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.5.0", path = "../tokio-postgres" } +tokio-postgres = { version = "0.5.2", path = "../tokio-postgres" } tokio = { version = "0.2", features = ["rt-core"] } log = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index c40f7e133..bca25f817 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -15,7 +15,7 @@ * Added `Client::build_transaction` to allow configuration of various transaction options. * Added `Client::cancel_token`, which returns a separate owned object that can be used to cancel queries. -* Added accessors for `Client` fields. +* Added accessors for `Config` fields. * Added a `GenericClient` trait implemented for `Client` and `Transaction` and covering shared functionality. ## v0.5.1 - 2019-12-25 From 778f2780594c6af35f9c22b623c2a6b8ae5dcff5 Mon Sep 17 00:00:00 2001 From: Andreas Runfalk Date: Wed, 5 Feb 2020 11:32:33 +0100 Subject: [PATCH 380/819] Added information on how to use Vec with Client::query_raw (fixes #567) --- postgres/src/client.rs | 27 +++++++++++++++++++++++++++ tokio-postgres/src/client.rs | 28 ++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index fb76f0f3c..0a3a51e1b 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -255,6 +255,33 @@ impl Client { /// # Ok(()) /// # } /// ``` + /// + /// If you have a type like `Vec` where `T: ToSql` Rust will not know how to use it as params. To get around + /// this the type must explicitly be converted to `&dyn ToSql`. + /// + /// ```no_run + /// # use postgres::{Client, NoTls}; + /// use postgres::types::ToSql; + /// use fallible_iterator::FallibleIterator; + /// # fn main() -> Result<(), postgres::Error> { + /// # let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let params: Vec = vec![ + /// "first param".into(), + /// "second param".into(), + /// ]; + /// let mut it = client.query_raw( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// params.iter().map(|p| p as &dyn ToSql), + /// )?; + /// + /// while let Some(row) = it.next()? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` pub fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 4cd0b31ce..7f622c568 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -313,6 +313,34 @@ impl Client { /// Panics if the number of parameters provided does not match the number expected. /// /// [`query`]: #method.query + /// + /// # Examples + /// + /// If you have a type like `Vec` where `T: ToSql` Rust will not know how to use it as params. To get around + /// this the type must explicitly be converted to `&dyn ToSql`. + /// + /// ```no_run + /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { + /// use tokio_postgres::types::ToSql; + /// use futures::{pin_mut, StreamExt}; + /// + /// let params: Vec = vec![ + /// "first param".into(), + /// "second param".into(), + /// ]; + /// let mut it = client.query_raw( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// params.iter().map(|p| p as &dyn ToSql), + /// ).await?; + /// + /// pin_mut!(it); + /// while let Some(row) = it.next().await.transpose()? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, From 4c0ee2c715b9bccf201021cb3cffa906d8aa458a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 9 Feb 2020 09:58:18 -0800 Subject: [PATCH 381/819] Example cleanup --- tokio-postgres/src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 7f622c568..0687a08e9 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -322,7 +322,7 @@ impl Client { /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { /// use tokio_postgres::types::ToSql; - /// use futures::{pin_mut, StreamExt}; + /// use futures::{pin_mut, TryStreamExt}; /// /// let params: Vec = vec![ /// "first param".into(), @@ -334,7 +334,7 @@ impl Client { /// ).await?; /// /// pin_mut!(it); - /// while let Some(row) = it.next().await.transpose()? { + /// while let Some(row) = it.try_next().await? { /// let foo: i32 = row.get("foo"); /// println!("foo: {}", foo); /// } From 51dac3c86207b8fa7abe60ed4f935690f72829c5 Mon Sep 17 00:00:00 2001 From: Richard Dodd Date: Mon, 30 Dec 2019 12:36:16 +0000 Subject: [PATCH 382/819] Add Debug impls. This commit makes the following changes - Add an opaque `Debug` impl for `Client`. - Add a rich `Debug` impl for `Row`. - Make the `Debug` impl for `Type` clearer. - Change the `Debug` for `Column` to be slightly neater. --- postgres-types/src/lib.rs | 8 +++- tokio-postgres/src/client.rs | 7 ++++ tokio-postgres/src/row.rs | 65 ++++++++++++++++++++++++++++++++- tokio-postgres/src/statement.rs | 15 +++++++- 4 files changed, 90 insertions(+), 5 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 0b1a0f2c4..f9876e682 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -207,9 +207,15 @@ mod special; mod type_gen; /// A Postgres type. -#[derive(PartialEq, Eq, Clone, Debug, Hash)] +#[derive(PartialEq, Eq, Clone, Hash)] pub struct Type(Inner); +impl fmt::Debug for Type { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, fmt) + } +} + impl fmt::Display for Type { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 0687a08e9..2d9b79728 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -21,6 +21,7 @@ use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; use std::collections::HashMap; +use std::fmt; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -529,3 +530,9 @@ impl Client { self.inner.sender.is_closed() } } + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Client").finish() + } +} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 03c7635b2..da313faa8 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -100,6 +100,62 @@ pub struct Row { ranges: Vec>>, } +/// A macro to map pg types to rust types, for debug display. +macro_rules! debug_row_type { + ($this:expr; $map:expr; $idx:expr; $name:expr; $type:expr; $($pg_ty:tt => $ty:ty),*) => { + match $type { + $( + &Type::$pg_ty => match <$ty as FromSql>::from_sql_nullable( + &Type::$pg_ty, + $this.0.col_buffer($idx), + ) { + Ok(val) => $map.entry(&$name, &val), + Err(_) => $map.entry(&$name, &""), + }, + )* + _ => $map.entry(&$name, &""), + } + } +} + +impl fmt::Debug for Row { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Provides debug impl for row contents. + struct RowData<'a>(&'a Row); + + impl fmt::Debug for RowData<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = f.debug_map(); + for (idx, col) in self.0.columns().iter().enumerate() { + debug_row_type!(self; map; idx; col.name(); col.type_(); + BOOL => bool, + INT2 => i16, + INT4 => i32, + INT8 => i64, + FLOAT4 => f32, + FLOAT8 => f64, + VARCHAR => String, + BPCHAR => String, + TEXT => String, + JSON => String, + XML => String, + TIMESTAMPTZ => std::time::SystemTime, + TIMESTAMP => std::time::SystemTime, + BYTEA => Vec + // More types could be added here. + ); + } + map.finish() + } + } + + f.debug_struct("Row") + .field("columns", &self.columns()) + .field("data", &RowData(self)) + .finish() + } +} + impl Row { pub(crate) fn new(statement: Statement, body: DataRowBody) -> Result { let ranges = body.ranges().collect().map_err(Error::parse)?; @@ -170,8 +226,13 @@ impl Row { )); } - let buf = self.ranges[idx].clone().map(|r| &self.body.buffer()[r]); - FromSql::from_sql_nullable(ty, buf).map_err(|e| Error::from_sql(e, idx)) + FromSql::from_sql_nullable(ty, self.col_buffer(idx)).map_err(|e| Error::from_sql(e, idx)) + } + + /// Get the raw bytes for the column at the given index. + fn col_buffer(&self, idx: usize) -> Option<&[u8]> { + let range = self.ranges[idx].to_owned()?; + Some(&self.body.buffer()[range]) } } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index d8fa1911f..97561a8e4 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -3,7 +3,10 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::Type; use postgres_protocol::message::frontend; -use std::sync::{Arc, Weak}; +use std::{ + fmt, + sync::{Arc, Weak}, +}; struct StatementInner { client: Weak, @@ -62,7 +65,6 @@ impl Statement { } /// Information about a column of a query. -#[derive(Debug)] pub struct Column { name: String, type_: Type, @@ -83,3 +85,12 @@ impl Column { &self.type_ } } + +impl fmt::Debug for Column { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Column") + .field("name", &self.name) + .field("type", &self.type_) + .finish() + } +} From 369427b38179d15d0ec11f4ee315a16d4aab96e5 Mon Sep 17 00:00:00 2001 From: Richard Dodd Date: Mon, 30 Dec 2019 13:09:11 +0000 Subject: [PATCH 383/819] Allow clippy lint in macro output. --- tokio-postgres/src/row.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index da313faa8..05a5117e2 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -124,6 +124,7 @@ impl fmt::Debug for Row { struct RowData<'a>(&'a Row); impl fmt::Debug for RowData<'_> { + #[allow(clippy::match_ref_pats)] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut map = f.debug_map(); for (idx, col) in self.0.columns().iter().enumerate() { From 6fd69dfd474d378ecdeecc77b1e9ec1acbb9a749 Mon Sep 17 00:00:00 2001 From: Richard Dodd Date: Tue, 21 Jan 2020 17:21:05 +0000 Subject: [PATCH 384/819] Make requested changes --- tokio-postgres/src/row.rs | 49 --------------------------------------- 1 file changed, 49 deletions(-) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 05a5117e2..842216ad2 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -100,59 +100,10 @@ pub struct Row { ranges: Vec>>, } -/// A macro to map pg types to rust types, for debug display. -macro_rules! debug_row_type { - ($this:expr; $map:expr; $idx:expr; $name:expr; $type:expr; $($pg_ty:tt => $ty:ty),*) => { - match $type { - $( - &Type::$pg_ty => match <$ty as FromSql>::from_sql_nullable( - &Type::$pg_ty, - $this.0.col_buffer($idx), - ) { - Ok(val) => $map.entry(&$name, &val), - Err(_) => $map.entry(&$name, &""), - }, - )* - _ => $map.entry(&$name, &""), - } - } -} - impl fmt::Debug for Row { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Provides debug impl for row contents. - struct RowData<'a>(&'a Row); - - impl fmt::Debug for RowData<'_> { - #[allow(clippy::match_ref_pats)] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut map = f.debug_map(); - for (idx, col) in self.0.columns().iter().enumerate() { - debug_row_type!(self; map; idx; col.name(); col.type_(); - BOOL => bool, - INT2 => i16, - INT4 => i32, - INT8 => i64, - FLOAT4 => f32, - FLOAT8 => f64, - VARCHAR => String, - BPCHAR => String, - TEXT => String, - JSON => String, - XML => String, - TIMESTAMPTZ => std::time::SystemTime, - TIMESTAMP => std::time::SystemTime, - BYTEA => Vec - // More types could be added here. - ); - } - map.finish() - } - } - f.debug_struct("Row") .field("columns", &self.columns()) - .field("data", &RowData(self)) .finish() } } From 2082d4f781a211f4248cbff026716b5f63bba04a Mon Sep 17 00:00:00 2001 From: Aaron Loucks Date: Fri, 28 Feb 2020 17:58:43 -0500 Subject: [PATCH 385/819] Add support for time-0.2 types --- postgres-types/Cargo.toml | 2 + postgres-types/src/lib.rs | 14 +++++ postgres-types/src/time_02.rs | 109 ++++++++++++++++++++++++++++++++++ postgres/Cargo.toml | 1 + tokio-postgres/Cargo.toml | 1 + 5 files changed, 127 insertions(+) create mode 100644 postgres-types/src/time_02.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 4add13815..a17829cec 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -18,6 +18,7 @@ with-eui48-0_4 = ["eui48-04"] with-geo-types-0_4 = ["geo-types-04"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] +with-time-0_2 = ["time-02"] [dependencies] bytes = "0.5" @@ -32,3 +33,4 @@ geo-types-04 = { version = "0.4", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } +time-02 = { version = "0.2", package = "time", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index f9876e682..88ab298df 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -198,9 +198,15 @@ mod eui48_04; mod geo_types_04; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; +#[cfg(feature = "with-time-0_2")] +mod time_02; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; +// The time::{date, time} macros produce compile errors if the crate package is renamed. +#[cfg(feature = "with-time-0_2")] +extern crate time_02 as time; + #[doc(hidden)] pub mod private; mod special; @@ -391,6 +397,10 @@ impl WrongType { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `time::PrimitiveDateTime` | TIMESTAMP | +/// | `time::OffsetDateTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `time::Date` | DATE | +/// | `time::Time` | TIME | /// | `eui48::MacAddress` | MACADDR | /// | `geo_types::Point` | POINT | /// | `geo_types::Rect` | BOX | @@ -650,6 +660,10 @@ pub enum IsNull { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `time::PrimitiveDateTime` | TIMESTAMP | +/// | `time::OffsetDateTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `time::Date` | DATE | +/// | `time::Time` | TIME | /// | `eui48::MacAddress` | MACADDR | /// | `geo_types::Point` | POINT | /// | `geo_types::Rect` | BOX | diff --git a/postgres-types/src/time_02.rs b/postgres-types/src/time_02.rs new file mode 100644 index 000000000..ce80267ce --- /dev/null +++ b/postgres-types/src/time_02.rs @@ -0,0 +1,109 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::convert::TryFrom; +use std::error::Error; +use time_02::{date, time, Date, Duration, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; + +use crate::{FromSql, IsNull, ToSql, Type}; + +#[rustfmt::skip] +const fn base() -> PrimitiveDateTime { + PrimitiveDateTime::new(date!(2000-01-01), time!(00:00:00)) +} + +impl<'a> FromSql<'a> for PrimitiveDateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let t = types::timestamp_from_sql(raw)?; + Ok(base() + Duration::microseconds(t)) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for PrimitiveDateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let time = match i64::try_from((*self - base()).whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::timestamp_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for OffsetDateTime { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { + let primitive = PrimitiveDateTime::from_sql(type_, raw)?; + Ok(primitive.assume_utc()) + } + + accepts!(TIMESTAMP, TIMESTAMPTZ); +} + +impl ToSql for OffsetDateTime { + fn to_sql( + &self, + type_: &Type, + w: &mut BytesMut, + ) -> Result> { + let utc_datetime = self.to_offset(UtcOffset::UTC); + let date = utc_datetime.date(); + let time = utc_datetime.time(); + let primitive = PrimitiveDateTime::new(date, time); + primitive.to_sql(type_, w) + } + + accepts!(TIMESTAMP, TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let jd = types::date_from_sql(raw)?; + Ok(base().date() + Duration::days(i64::from(jd))) + } + + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let jd = (*self - base().date()).whole_days(); + if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { + return Err("value too large to transmit".into()); + } + + types::date_to_sql(jd as i32, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let usec = types::time_from_sql(raw)?; + Ok(time!(00:00:00) + Duration::microseconds(usec)) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let delta = *self - time!(00:00:00); + let time = match i64::try_from(delta.whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::time_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 6ba57b113..9bfdd9a34 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -27,6 +27,7 @@ with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] +with-time-0_2 = ["tokio-postgres/with-time-0_2"] [dependencies] bytes = "0.5" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 2f3dd40c3..f3f8df734 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -33,6 +33,7 @@ with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] +with-time-0_2 = ["postgres-types/with-time-0_2"] [dependencies] async-trait = "0.1" From 25db147e874a9fde450c0215d8769e679d1cca18 Mon Sep 17 00:00:00 2001 From: Aaron Loucks Date: Fri, 28 Feb 2020 19:03:59 -0500 Subject: [PATCH 386/819] Port chrono tests to time tests --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/tests/test/types/mod.rs | 2 + tokio-postgres/tests/test/types/time_02.rs | 147 +++++++++++++++++++++ 3 files changed, 150 insertions(+) create mode 100644 tokio-postgres/tests/test/types/time_02.rs diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f3f8df734..f3bf3338d 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -63,4 +63,5 @@ geo-types-04 = { version = "0.4", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } +time-02 = { version = "0.2", package = "time" } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index df6a2e934..5d292db52 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -22,6 +22,8 @@ mod eui48_04; mod geo_010; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; +#[cfg(feature = "with-time-0_2")] +mod time_02; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; diff --git a/tokio-postgres/tests/test/types/time_02.rs b/tokio-postgres/tests/test/types/time_02.rs new file mode 100644 index 000000000..8690788bd --- /dev/null +++ b/tokio-postgres/tests/test/types/time_02.rs @@ -0,0 +1,147 @@ +use time_02::{OffsetDateTime, PrimitiveDateTime}; +use tokio_postgres::types::{Date, Timestamp}; + +use crate::types::test_type; + +#[tokio::test] +async fn test_naive_date_time_params() { + fn make_check(time: &str) -> (Option, &str) { + ( + Some(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_naive_date_time_params() { + fn make_check(time: &str) -> (Timestamp, &str) { + ( + Timestamp::Value(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_time_params() { + fn make_check(time: &str) -> (Option, &str) { + ( + Some(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_time_params() { + fn make_check(time: &str) -> (Timestamp, &str) { + ( + Timestamp::Value(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_params() { + fn make_check(time: &str) -> (Option, &str) { + ( + Some(time_02::Date::parse(time, "'%Y-%m-%d'").unwrap()), + time, + ) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_params() { + fn make_check(date: &str) -> (Date, &str) { + ( + Date::Value(time_02::Date::parse(date, "'%Y-%m-%d'").unwrap()), + date, + ) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_time_params() { + fn make_check(time: &str) -> (Option, &str) { + ( + Some(time_02::Time::parse(time, "'%H:%M:%S.%f'").unwrap()), + time, + ) + } + test_type( + "TIME", + &[ + make_check("'00:00:00.010000000'"), + make_check("'11:19:33.100314000'"), + make_check("'23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} From f33b145cd663d1abb60101344835acff48e07234 Mon Sep 17 00:00:00 2001 From: Aaron Loucks Date: Fri, 28 Feb 2020 19:21:14 -0500 Subject: [PATCH 387/819] Remove fractional seconds from time_02 tests --- tokio-postgres/tests/test/types/time_02.rs | 51 ++++++++++++---------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/tokio-postgres/tests/test/types/time_02.rs b/tokio-postgres/tests/test/types/time_02.rs index 8690788bd..2b2040212 100644 --- a/tokio-postgres/tests/test/types/time_02.rs +++ b/tokio-postgres/tests/test/types/time_02.rs @@ -3,20 +3,23 @@ use tokio_postgres::types::{Date, Timestamp}; use crate::types::test_type; +// time 0.2 does not [yet?] support parsing fractional seconds +// https://github.com/time-rs/time/issues/226 + #[tokio::test] -async fn test_naive_date_time_params() { +async fn test_primitive_date_time_params() { fn make_check(time: &str) -> (Option, &str) { ( - Some(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + Some(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S'").unwrap()), time, ) } test_type( "TIMESTAMP", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00'"), // .010000000 + make_check("'1965-09-25 11:19:33'"), // .100314000 + make_check("'2010-02-09 23:11:45'"), // .120200000 (None, "NULL"), ], ) @@ -24,19 +27,19 @@ async fn test_naive_date_time_params() { } #[tokio::test] -async fn test_with_special_naive_date_time_params() { +async fn test_with_special_primitive_date_time_params() { fn make_check(time: &str) -> (Timestamp, &str) { ( - Timestamp::Value(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + Timestamp::Value(PrimitiveDateTime::parse(time, "'%Y-%m-%d %H:%M:%S'").unwrap()), time, ) } test_type( "TIMESTAMP", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00'"), // .010000000 + make_check("'1965-09-25 11:19:33'"), // .100314000 + make_check("'2010-02-09 23:11:45'"), // .120200000 (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], @@ -45,19 +48,19 @@ async fn test_with_special_naive_date_time_params() { } #[tokio::test] -async fn test_date_time_params() { +async fn test_offset_date_time_params() { fn make_check(time: &str) -> (Option, &str) { ( - Some(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + Some(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S %z'").unwrap()), time, ) } test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00 +0000'"), // .010000000 + make_check("'1965-09-25 11:19:33 +0000'"), // .100314000 + make_check("'2010-02-09 23:11:45 +0000'"), // .120200000 (None, "NULL"), ], ) @@ -65,19 +68,19 @@ async fn test_date_time_params() { } #[tokio::test] -async fn test_with_special_date_time_params() { +async fn test_with_special_offset_date_time_params() { fn make_check(time: &str) -> (Timestamp, &str) { ( - Timestamp::Value(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap()), + Timestamp::Value(OffsetDateTime::parse(time, "'%Y-%m-%d %H:%M:%S %z'").unwrap()), time, ) } test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00 +0000'"), // .010000000 + make_check("'1965-09-25 11:19:33 +0000'"), // .100314000 + make_check("'2010-02-09 23:11:45 +0000'"), // .120200000 (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], @@ -130,16 +133,16 @@ async fn test_with_special_date_params() { async fn test_time_params() { fn make_check(time: &str) -> (Option, &str) { ( - Some(time_02::Time::parse(time, "'%H:%M:%S.%f'").unwrap()), + Some(time_02::Time::parse(time, "'%H:%M:%S'").unwrap()), time, ) } test_type( "TIME", &[ - make_check("'00:00:00.010000000'"), - make_check("'11:19:33.100314000'"), - make_check("'23:11:45.120200000'"), + make_check("'00:00:00'"), // .010000000 + make_check("'11:19:33'"), // .100314000 + make_check("'23:11:45'"), // .120200000 (None, "NULL"), ], ) From 6d8c403a8ddd98c3af12d79670fe6450cbb56d81 Mon Sep 17 00:00:00 2001 From: Aaron Loucks Date: Fri, 28 Feb 2020 19:52:20 -0500 Subject: [PATCH 388/819] OffsetDateTime now only supports TIMESTAMPZ --- postgres-types/src/lib.rs | 4 ++-- postgres-types/src/time_02.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 88ab298df..1479c1264 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -398,7 +398,7 @@ impl WrongType { /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | /// | `time::PrimitiveDateTime` | TIMESTAMP | -/// | `time::OffsetDateTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | /// | `time::Time` | TIME | /// | `eui48::MacAddress` | MACADDR | @@ -661,7 +661,7 @@ pub enum IsNull { /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | /// | `time::PrimitiveDateTime` | TIMESTAMP | -/// | `time::OffsetDateTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | +/// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | /// | `time::Time` | TIME | /// | `eui48::MacAddress` | MACADDR | diff --git a/postgres-types/src/time_02.rs b/postgres-types/src/time_02.rs index ce80267ce..19a8909e7 100644 --- a/postgres-types/src/time_02.rs +++ b/postgres-types/src/time_02.rs @@ -40,7 +40,7 @@ impl<'a> FromSql<'a> for OffsetDateTime { Ok(primitive.assume_utc()) } - accepts!(TIMESTAMP, TIMESTAMPTZ); + accepts!(TIMESTAMPTZ); } impl ToSql for OffsetDateTime { @@ -56,7 +56,7 @@ impl ToSql for OffsetDateTime { primitive.to_sql(type_, w) } - accepts!(TIMESTAMP, TIMESTAMPTZ); + accepts!(TIMESTAMPTZ); to_sql_checked!(); } From 9f6d03d38be52f3f2faaa447a31092f5f43160e9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Mar 2020 05:31:31 -0800 Subject: [PATCH 389/819] Update tokio-util --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/codec.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f3bf3338d..0d399ae0a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -49,7 +49,7 @@ phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.0", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } -tokio-util = { version = "0.2", features = ["codec"] } +tokio-util = { version = "0.3", features = ["codec"] } [dev-dependencies] tokio = { version = "0.2", features = ["full"] } diff --git a/tokio-postgres/src/codec.rs b/tokio-postgres/src/codec.rs index 2fae8bc17..9d078044b 100644 --- a/tokio-postgres/src/codec.rs +++ b/tokio-postgres/src/codec.rs @@ -37,8 +37,7 @@ impl FallibleIterator for BackendMessages { pub struct PostgresCodec; -impl Encoder for PostgresCodec { - type Item = FrontendMessage; +impl Encoder for PostgresCodec { type Error = io::Error; fn encode(&mut self, item: FrontendMessage, dst: &mut BytesMut) -> io::Result<()> { From e51028385bb9023dda03704a16a2309e30300316 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Mar 2020 15:51:08 -0800 Subject: [PATCH 390/819] Log notices in Connection::poll impl --- tokio-postgres/src/connection.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index ac1867431..9c8e369f1 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -8,7 +8,7 @@ use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::stream::FusedStream; use futures::{ready, Sink, Stream, StreamExt}; -use log::trace; +use log::{info, trace}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::collections::{HashMap, VecDeque}; @@ -330,7 +330,11 @@ where type Output = Result<(), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - while let Some(_) = ready!(self.poll_message(cx)?) {} + while let Some(message) = ready!(self.poll_message(cx)?) { + if let AsyncMessage::Notice(notice) = message { + info!("{}: {}", notice.severity(), notice.message()); + } + } Poll::Ready(Ok(())) } } From d8470a75a5844250ef7815e9699c7662533b6dbe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Mar 2020 16:00:49 -0800 Subject: [PATCH 391/819] Release postgres-types v0.1.1 --- postgres-types/CHANGELOG.md | 6 ++++++ postgres-types/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 1569598eb..f12c5c7f6 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.1.1 - 2020-03-05 + +### Added + +* Added support for `time` 0.2. + ## v0.1.0 - 2019-12-23 ### Changed diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index a17829cec..a12f1f513 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.0" +version = "0.1.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 72e0dbfcc09774c0b10e1e37a8c3c923e01168f8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Mar 2020 17:11:46 -0800 Subject: [PATCH 392/819] Release tokio-postgres v0.5.3 --- tokio-postgres/CHANGELOG.md | 7 +++++++ tokio-postgres/Cargo.toml | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index bca25f817..e65f00f14 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,12 @@ # Change Log +## v0.5.3 - 2020-03-05 + +### Added + +* Added `Debug` implementations for `Client`, `Row`, and `Column`. +* Added `time` 0.2 support. + ## v0.5.2 - 2020-01-31 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 0d399ae0a..d1571c352 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.2" +version = "0.5.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -47,7 +47,7 @@ percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-types = { version = "0.1.0", path = "../postgres-types" } +postgres-types = { version = "0.1.1", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.3", features = ["codec"] } From ff36ce993d748da91cfa305eaf4b71512a31f2be Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Mar 2020 17:13:32 -0800 Subject: [PATCH 393/819] Release postgres v0.17.2 --- postgres/CHANGELOG.md | 7 +++++++ postgres/Cargo.toml | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 988578d46..550f0c706 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,12 @@ # Change Log +## v0.17.2 - 2020-03-05 + +### Added + +* Added `Debug` implementations for `Client`, `Row`, and `Column`. +* Added `time` 0.2 support. + ## v0.17.1 - 2020-01-31 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 9bfdd9a34..d23715cde 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.1" +version = "0.17.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -33,7 +33,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.5.2", path = "../tokio-postgres" } +tokio-postgres = { version = "0.5.3", path = "../tokio-postgres" } tokio = { version = "0.2", features = ["rt-core"] } log = "0.4" From 0fb9ede350f0346f7e1a45af92aa53193da7437b Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Mar 2020 13:24:28 +0000 Subject: [PATCH 394/819] Update base64 requirement from 0.11 to 0.12 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Release notes](https://github.com/marshallpierce/rust-base64/releases) - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.11.0...v0.12.0) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 4ea70f0ae..8fbd0093c 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [dependencies] -base64 = "0.11" +base64 = "0.12" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" From b10f964a1b95412c980f157a3df39b2d4fbbdc87 Mon Sep 17 00:00:00 2001 From: "Michael P. Jung" Date: Mon, 16 Mar 2020 13:56:41 +0100 Subject: [PATCH 395/819] Add documentation for crate features This closes #544 --- postgres/src/lib.rs | 14 ++++++++++++++ tokio-postgres/src/lib.rs | 15 +++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index ef669f7dc..2b2dcec38 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -46,6 +46,20 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` //! crates, respectively. +//! +//! # Features +//! +//! The following features can be enabled from `Cargo.toml`: +//! +//! | Feature | Description | Extra dependencies | Default | +//! | ------- | ----------- | ------------------ | ------- | +//! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | +//! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | +//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | #![doc(html_root_url = "https://docs.rs/postgres/0.17")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 0d8aa8436..2845fcf61 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -96,6 +96,21 @@ //! as an argument. The `NoTls` type in this crate can be used when TLS is not required. Otherwise, the //! `postgres-openssl` and `postgres-native-tls` crates provide implementations backed by the `openssl` and `native-tls` //! crates, respectively. +//! +//! # Features +//! +//! The following features can be enabled from `Cargo.toml`: +//! +//! | Feature | Description | Extra dependencies | Default | +//! | ------- | ----------- | ------------------ | ------- | +//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.2 with the features `dns`, `net` and `time` | yes | +//! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | +//! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | +//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] From e25987656eea9deed0554489b9fd596ccf699dc0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 17 Mar 2020 15:36:22 -0700 Subject: [PATCH 396/819] Release postgres-protocol v0.5.1 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 375465b22..b099c1ffa 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.1 - 2020-03-17 + +### Changed + +* Upgraded `base64` to 0.12. + ## v0.5.0 - 2019-12-23 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 8fbd0093c..ebad1aefc 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.0" +version = "0.5.1" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From fd3a99c225a2ee1f1eed7940b5c20a6496ca39e0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 22 Mar 2020 12:05:00 -0700 Subject: [PATCH 397/819] Don't spawn off connection in blocking impl We can now directly return fatal errors, and intercept notifications --- postgres/src/binary_copy.rs | 22 +++--- postgres/src/client.rs | 72 ++++++------------- postgres/src/config.rs | 14 +--- postgres/src/connection.rs | 106 ++++++++++++++++++++++++++++ postgres/src/copy_in_writer.rs | 12 ++-- postgres/src/copy_out_reader.rs | 20 +++--- postgres/src/lib.rs | 5 +- postgres/src/row_iter.rs | 17 ++--- postgres/src/transaction.rs | 68 +++++++++--------- postgres/src/transaction_builder.rs | 17 +++-- tokio-postgres/tests/test/main.rs | 8 +-- 11 files changed, 219 insertions(+), 142 deletions(-) create mode 100644 postgres/src/connection.rs diff --git a/postgres/src/binary_copy.rs b/postgres/src/binary_copy.rs index 7828cb599..259347195 100644 --- a/postgres/src/binary_copy.rs +++ b/postgres/src/binary_copy.rs @@ -1,7 +1,8 @@ //! Utilities for working with the PostgreSQL binary copy format. +use crate::connection::ConnectionRef; use crate::types::{ToSql, Type}; -use crate::{CopyInWriter, CopyOutReader, Error, Rt}; +use crate::{CopyInWriter, CopyOutReader, Error}; use fallible_iterator::FallibleIterator; use futures::StreamExt; use std::pin::Pin; @@ -13,7 +14,7 @@ use tokio_postgres::binary_copy::{self, BinaryCopyOutStream}; /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct BinaryCopyInWriter<'a> { - runtime: Rt<'a>, + connection: ConnectionRef<'a>, sink: Pin>, } @@ -26,7 +27,7 @@ impl<'a> BinaryCopyInWriter<'a> { .expect("writer has already been written to"); BinaryCopyInWriter { - runtime: writer.runtime, + connection: writer.connection, sink: Box::pin(binary_copy::BinaryCopyInWriter::new(stream, types)), } } @@ -37,7 +38,7 @@ impl<'a> BinaryCopyInWriter<'a> { /// /// Panics if the number of values provided does not match the number expected. pub fn write(&mut self, values: &[&(dyn ToSql + Sync)]) -> Result<(), Error> { - self.runtime.block_on(self.sink.as_mut().write(values)) + self.connection.block_on(self.sink.as_mut().write(values)) } /// A maximally-flexible version of `write`. @@ -50,20 +51,21 @@ impl<'a> BinaryCopyInWriter<'a> { I: IntoIterator, I::IntoIter: ExactSizeIterator, { - self.runtime.block_on(self.sink.as_mut().write_raw(values)) + self.connection + .block_on(self.sink.as_mut().write_raw(values)) } /// Completes the copy, returning the number of rows added. /// /// This method *must* be used to complete the copy process. If it is not, the copy will be aborted. pub fn finish(mut self) -> Result { - self.runtime.block_on(self.sink.as_mut().finish()) + self.connection.block_on(self.sink.as_mut().finish()) } } /// An iterator of rows deserialized from the PostgreSQL binary copy format. pub struct BinaryCopyOutIter<'a> { - runtime: Rt<'a>, + connection: ConnectionRef<'a>, stream: Pin>, } @@ -76,7 +78,7 @@ impl<'a> BinaryCopyOutIter<'a> { .expect("reader has already been read from"); BinaryCopyOutIter { - runtime: reader.runtime, + connection: reader.connection, stream: Box::pin(BinaryCopyOutStream::new(stream, types)), } } @@ -87,6 +89,8 @@ impl FallibleIterator for BinaryCopyOutIter<'_> { type Error = Error; fn next(&mut self) -> Result, Error> { - self.runtime.block_on(self.stream.next()).transpose() + let stream = &mut self.stream; + self.connection + .block_on(async { stream.next().await.transpose() }) } } diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 0a3a51e1b..3ae5f86cc 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,45 +1,21 @@ +use crate::connection::Connection; use crate::{ CancelToken, Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction, TransactionBuilder, }; -use std::ops::{Deref, DerefMut}; -use tokio::runtime::Runtime; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; -pub(crate) struct Rt<'a>(pub &'a mut Runtime); - -// no-op impl to extend the borrow until drop -impl Drop for Rt<'_> { - fn drop(&mut self) {} -} - -impl Deref for Rt<'_> { - type Target = Runtime; - - #[inline] - fn deref(&self) -> &Runtime { - self.0 - } -} - -impl DerefMut for Rt<'_> { - #[inline] - fn deref_mut(&mut self) -> &mut Runtime { - self.0 - } -} - /// A synchronous PostgreSQL client. pub struct Client { - runtime: Runtime, + connection: Connection, client: tokio_postgres::Client, } impl Client { - pub(crate) fn new(runtime: Runtime, client: tokio_postgres::Client) -> Client { - Client { runtime, client } + pub(crate) fn new(connection: Connection, client: tokio_postgres::Client) -> Client { + Client { connection, client } } /// A convenience function which parses a configuration string into a `Config` and then connects to the database. @@ -62,10 +38,6 @@ impl Client { Config::new() } - fn rt(&mut self) -> Rt<'_> { - Rt(&mut self.runtime) - } - /// Executes a statement, returning the number of rows modified. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list @@ -104,7 +76,7 @@ impl Client { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.client.execute(query, params)) + self.connection.block_on(self.client.execute(query, params)) } /// Executes a statement, returning the resulting rows. @@ -140,7 +112,7 @@ impl Client { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.client.query(query, params)) + self.connection.block_on(self.client.query(query, params)) } /// Executes a statement which returns a single row, returning it. @@ -177,7 +149,8 @@ impl Client { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.client.query_one(query, params)) + self.connection + .block_on(self.client.query_one(query, params)) } /// Executes a statement which returns zero or one rows, returning it. @@ -223,7 +196,8 @@ impl Client { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.client.query_opt(query, params)) + self.connection + .block_on(self.client.query_opt(query, params)) } /// A maximally-flexible version of `query`. @@ -289,9 +263,9 @@ impl Client { I::IntoIter: ExactSizeIterator, { let stream = self - .runtime + .connection .block_on(self.client.query_raw(query, params))?; - Ok(RowIter::new(self.rt(), stream)) + Ok(RowIter::new(self.connection.as_ref(), stream)) } /// Creates a new prepared statement. @@ -318,7 +292,7 @@ impl Client { /// # } /// ``` pub fn prepare(&mut self, query: &str) -> Result { - self.runtime.block_on(self.client.prepare(query)) + self.connection.block_on(self.client.prepare(query)) } /// Like `prepare`, but allows the types of query parameters to be explicitly specified. @@ -349,7 +323,7 @@ impl Client { /// # } /// ``` pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.runtime + self.connection .block_on(self.client.prepare_typed(query, types)) } @@ -380,8 +354,8 @@ impl Client { where T: ?Sized + ToStatement, { - let sink = self.runtime.block_on(self.client.copy_in(query))?; - Ok(CopyInWriter::new(self.rt(), sink)) + let sink = self.connection.block_on(self.client.copy_in(query))?; + Ok(CopyInWriter::new(self.connection.as_ref(), sink)) } /// Executes a `COPY TO STDOUT` statement, returning a reader of the resulting data. @@ -408,8 +382,8 @@ impl Client { where T: ?Sized + ToStatement, { - let stream = self.runtime.block_on(self.client.copy_out(query))?; - Ok(CopyOutReader::new(self.rt(), stream)) + let stream = self.connection.block_on(self.client.copy_out(query))?; + Ok(CopyOutReader::new(self.connection.as_ref(), stream)) } /// Executes a sequence of SQL statements using the simple query protocol. @@ -428,7 +402,7 @@ impl Client { /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { - self.runtime.block_on(self.client.simple_query(query)) + self.connection.block_on(self.client.simple_query(query)) } /// Executes a sequence of SQL statements using the simple query protocol. @@ -442,7 +416,7 @@ impl Client { /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - self.runtime.block_on(self.client.batch_execute(query)) + self.connection.block_on(self.client.batch_execute(query)) } /// Begins a new database transaction. @@ -466,8 +440,8 @@ impl Client { /// # } /// ``` pub fn transaction(&mut self) -> Result, Error> { - let transaction = self.runtime.block_on(self.client.transaction())?; - Ok(Transaction::new(&mut self.runtime, transaction)) + let transaction = self.connection.block_on(self.client.transaction())?; + Ok(Transaction::new(self.connection.as_ref(), transaction)) } /// Returns a builder for a transaction with custom settings. @@ -494,7 +468,7 @@ impl Client { /// # } /// ``` pub fn build_transaction(&mut self) -> TransactionBuilder<'_> { - TransactionBuilder::new(&mut self.runtime, self.client.build_transaction()) + TransactionBuilder::new(self.connection.as_ref(), self.client.build_transaction()) } /// Constructs a cancellation token that can later be used to request diff --git a/postgres/src/config.rs b/postgres/src/config.rs index f6b151a8e..b344efdd2 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -2,9 +2,8 @@ //! //! Requires the `runtime` Cargo feature (enabled by default). +use crate::connection::Connection; use crate::Client; -use futures::FutureExt; -use log::error; use std::fmt; use std::path::Path; use std::str::FromStr; @@ -324,15 +323,8 @@ impl Config { let (client, connection) = runtime.block_on(self.config.connect(tls))?; - // FIXME don't spawn this so error reporting is less weird. - let connection = connection.map(|r| { - if let Err(e) = r { - error!("postgres connection error: {}", e) - } - }); - runtime.spawn(connection); - - Ok(Client::new(runtime, client)) + let connection = Connection::new(runtime, connection); + Ok(Client::new(connection, client)) } } diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs new file mode 100644 index 000000000..440ad5daf --- /dev/null +++ b/postgres/src/connection.rs @@ -0,0 +1,106 @@ +use crate::{Error, Notification}; +use futures::future; +use futures::{pin_mut, Stream}; +use log::info; +use std::collections::VecDeque; +use std::future::Future; +use std::ops::{Deref, DerefMut}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::runtime::Runtime; +use tokio_postgres::AsyncMessage; + +pub struct Connection { + runtime: Runtime, + connection: Pin> + Send>>, + notifications: VecDeque, +} + +impl Connection { + pub fn new(runtime: Runtime, connection: tokio_postgres::Connection) -> Connection + where + S: AsyncRead + AsyncWrite + Unpin + 'static + Send, + T: AsyncRead + AsyncWrite + Unpin + 'static + Send, + { + Connection { + runtime, + connection: Box::pin(ConnectionStream { connection }), + notifications: VecDeque::new(), + } + } + + pub fn as_ref(&mut self) -> ConnectionRef<'_> { + ConnectionRef { connection: self } + } + + pub fn block_on(&mut self, future: F) -> Result + where + F: Future>, + { + pin_mut!(future); + let connection = &mut self.connection; + let notifications = &mut self.notifications; + self.runtime.block_on({ + future::poll_fn(|cx| { + loop { + match connection.as_mut().poll_next(cx) { + Poll::Ready(Some(Ok(AsyncMessage::Notification(notification)))) => { + notifications.push_back(notification); + } + Poll::Ready(Some(Ok(AsyncMessage::Notice(notice)))) => { + info!("{}: {}", notice.severity(), notice.message()); + } + Poll::Ready(Some(Ok(_))) => {} + Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e)), + Poll::Ready(None) | Poll::Pending => break, + } + } + + future.as_mut().poll(cx) + }) + }) + } +} + +pub struct ConnectionRef<'a> { + connection: &'a mut Connection, +} + +// no-op impl to extend the borrow until drop +impl Drop for ConnectionRef<'_> { + #[inline] + fn drop(&mut self) {} +} + +impl Deref for ConnectionRef<'_> { + type Target = Connection; + + #[inline] + fn deref(&self) -> &Connection { + self.connection + } +} + +impl DerefMut for ConnectionRef<'_> { + #[inline] + fn deref_mut(&mut self) -> &mut Connection { + self.connection + } +} + +struct ConnectionStream { + connection: tokio_postgres::Connection, +} + +impl Stream for ConnectionStream +where + S: AsyncRead + AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.connection.poll_message(cx) + } +} diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index fc11818ab..c996ed857 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,5 +1,5 @@ +use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; -use crate::Rt; use bytes::{Bytes, BytesMut}; use futures::SinkExt; use std::io; @@ -10,15 +10,15 @@ use tokio_postgres::{CopyInSink, Error}; /// /// The copy *must* be explicitly completed via the `finish` method. If it is not, the copy will be aborted. pub struct CopyInWriter<'a> { - pub(crate) runtime: Rt<'a>, + pub(crate) connection: ConnectionRef<'a>, pub(crate) sink: LazyPin>, buf: BytesMut, } impl<'a> CopyInWriter<'a> { - pub(crate) fn new(runtime: Rt<'a>, sink: CopyInSink) -> CopyInWriter<'a> { + pub(crate) fn new(connection: ConnectionRef<'a>, sink: CopyInSink) -> CopyInWriter<'a> { CopyInWriter { - runtime, + connection, sink: LazyPin::new(sink), buf: BytesMut::new(), } @@ -29,7 +29,7 @@ impl<'a> CopyInWriter<'a> { /// If this is not called, the copy will be aborted. pub fn finish(mut self) -> Result { self.flush_inner()?; - self.runtime.block_on(self.sink.pinned().finish()) + self.connection.block_on(self.sink.pinned().finish()) } fn flush_inner(&mut self) -> Result<(), Error> { @@ -37,7 +37,7 @@ impl<'a> CopyInWriter<'a> { return Ok(()); } - self.runtime + self.connection .block_on(self.sink.pinned().send(self.buf.split().freeze())) } } diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 9091e2200..a205d1a1a 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,5 +1,5 @@ +use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; -use crate::Rt; use bytes::{Buf, Bytes}; use futures::StreamExt; use std::io::{self, BufRead, Read}; @@ -7,15 +7,15 @@ use tokio_postgres::CopyOutStream; /// The reader returned by the `copy_out` method. pub struct CopyOutReader<'a> { - pub(crate) runtime: Rt<'a>, + pub(crate) connection: ConnectionRef<'a>, pub(crate) stream: LazyPin, cur: Bytes, } impl<'a> CopyOutReader<'a> { - pub(crate) fn new(runtime: Rt<'a>, stream: CopyOutStream) -> CopyOutReader<'a> { + pub(crate) fn new(connection: ConnectionRef<'a>, stream: CopyOutStream) -> CopyOutReader<'a> { CopyOutReader { - runtime, + connection, stream: LazyPin::new(stream), cur: Bytes::new(), } @@ -35,10 +35,14 @@ impl Read for CopyOutReader<'_> { impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { if !self.cur.has_remaining() { - match self.runtime.block_on(self.stream.pinned().next()) { - Some(Ok(cur)) => self.cur = cur, - Some(Err(e)) => return Err(io::Error::new(io::ErrorKind::Other, e)), - None => {} + let mut stream = self.stream.pinned(); + match self + .connection + .block_on({ async { stream.next().await.transpose() } }) + { + Ok(Some(cur)) => self.cur = cur, + Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), + Ok(None) => {} }; } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 2b2dcec38..78b318b16 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -65,8 +65,8 @@ pub use fallible_iterator; pub use tokio_postgres::{ - error, row, tls, types, Column, IsolationLevel, Portal, SimpleQueryMessage, Socket, Statement, - ToStatement, + error, row, tls, types, Column, IsolationLevel, Notification, Portal, SimpleQueryMessage, + Socket, Statement, ToStatement, }; pub use crate::cancel_token::CancelToken; @@ -89,6 +89,7 @@ pub mod binary_copy; mod cancel_token; mod client; pub mod config; +mod connection; mod copy_in_writer; mod copy_out_reader; mod generic_client; diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 4be5f3477..3cd41b900 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,4 +1,4 @@ -use crate::Rt; +use crate::connection::ConnectionRef; use fallible_iterator::FallibleIterator; use futures::StreamExt; use std::pin::Pin; @@ -6,19 +6,14 @@ use tokio_postgres::{Error, Row, RowStream}; /// The iterator returned by `query_raw`. pub struct RowIter<'a> { - runtime: Rt<'a>, + connection: ConnectionRef<'a>, it: Pin>, } -// no-op impl to extend the borrow until drop -impl Drop for RowIter<'_> { - fn drop(&mut self) {} -} - impl<'a> RowIter<'a> { - pub(crate) fn new(runtime: Rt<'a>, stream: RowStream) -> RowIter<'a> { + pub(crate) fn new(connection: ConnectionRef<'a>, stream: RowStream) -> RowIter<'a> { RowIter { - runtime, + connection, it: Box::pin(stream), } } @@ -29,6 +24,8 @@ impl FallibleIterator for RowIter<'_> { type Error = Error; fn next(&mut self) -> Result, Error> { - self.runtime.block_on(self.it.next()).transpose() + let it = &mut self.it; + self.connection + .block_on(async { it.next().await.transpose() }) } } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index e5b3682f0..25bfff578 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,7 +1,5 @@ -use crate::{ - CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Rt, Statement, ToStatement, -}; -use tokio::runtime::Runtime; +use crate::connection::ConnectionRef; +use crate::{CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; @@ -10,45 +8,41 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// Transactions will implicitly roll back by default when dropped. Use the `commit` method to commit the changes made /// in the transaction. Transactions can be nested, with inner transactions implemented via savepoints. pub struct Transaction<'a> { - runtime: &'a mut Runtime, + connection: ConnectionRef<'a>, transaction: tokio_postgres::Transaction<'a>, } impl<'a> Transaction<'a> { pub(crate) fn new( - runtime: &'a mut Runtime, + connection: ConnectionRef<'a>, transaction: tokio_postgres::Transaction<'a>, ) -> Transaction<'a> { Transaction { - runtime, + connection, transaction, } } - fn rt(&mut self) -> Rt<'_> { - Rt(self.runtime) - } - /// Consumes the transaction, committing all changes made within it. - pub fn commit(self) -> Result<(), Error> { - self.runtime.block_on(self.transaction.commit()) + pub fn commit(mut self) -> Result<(), Error> { + self.connection.block_on(self.transaction.commit()) } /// Rolls the transaction back, discarding all changes made within it. /// /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. - pub fn rollback(self) -> Result<(), Error> { - self.runtime.block_on(self.transaction.rollback()) + pub fn rollback(mut self) -> Result<(), Error> { + self.connection.block_on(self.transaction.rollback()) } /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { - self.runtime.block_on(self.transaction.prepare(query)) + self.connection.block_on(self.transaction.prepare(query)) } /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.runtime + self.connection .block_on(self.transaction.prepare_typed(query, types)) } @@ -57,7 +51,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.runtime + self.connection .block_on(self.transaction.execute(query, params)) } @@ -66,7 +60,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.transaction.query(query, params)) + self.connection + .block_on(self.transaction.query(query, params)) } /// Like `Client::query_one`. @@ -74,7 +69,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.runtime + self.connection .block_on(self.transaction.query_one(query, params)) } @@ -87,7 +82,7 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.runtime + self.connection .block_on(self.transaction.query_opt(query, params)) } @@ -99,9 +94,9 @@ impl<'a> Transaction<'a> { I::IntoIter: ExactSizeIterator, { let stream = self - .runtime + .connection .block_on(self.transaction.query_raw(query, params))?; - Ok(RowIter::new(self.rt(), stream)) + Ok(RowIter::new(self.connection.as_ref(), stream)) } /// Binds parameters to a statement, creating a "portal". @@ -118,7 +113,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - self.runtime.block_on(self.transaction.bind(query, params)) + self.connection + .block_on(self.transaction.bind(query, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -126,7 +122,7 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - self.runtime + self.connection .block_on(self.transaction.query_portal(portal, max_rows)) } @@ -137,9 +133,9 @@ impl<'a> Transaction<'a> { max_rows: i32, ) -> Result, Error> { let stream = self - .runtime + .connection .block_on(self.transaction.query_portal_raw(portal, max_rows))?; - Ok(RowIter::new(self.rt(), stream)) + Ok(RowIter::new(self.connection.as_ref(), stream)) } /// Like `Client::copy_in`. @@ -147,8 +143,8 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let sink = self.runtime.block_on(self.transaction.copy_in(query))?; - Ok(CopyInWriter::new(self.rt(), sink)) + let sink = self.connection.block_on(self.transaction.copy_in(query))?; + Ok(CopyInWriter::new(self.connection.as_ref(), sink)) } /// Like `Client::copy_out`. @@ -156,18 +152,20 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let stream = self.runtime.block_on(self.transaction.copy_out(query))?; - Ok(CopyOutReader::new(self.rt(), stream)) + let stream = self.connection.block_on(self.transaction.copy_out(query))?; + Ok(CopyOutReader::new(self.connection.as_ref(), stream)) } /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { - self.runtime.block_on(self.transaction.simple_query(query)) + self.connection + .block_on(self.transaction.simple_query(query)) } /// Like `Client::batch_execute`. pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { - self.runtime.block_on(self.transaction.batch_execute(query)) + self.connection + .block_on(self.transaction.batch_execute(query)) } /// Like `Client::cancel_token`. @@ -177,9 +175,9 @@ impl<'a> Transaction<'a> { /// Like `Client::transaction`. pub fn transaction(&mut self) -> Result, Error> { - let transaction = self.runtime.block_on(self.transaction.transaction())?; + let transaction = self.connection.block_on(self.transaction.transaction())?; Ok(Transaction { - runtime: self.runtime, + connection: self.connection.as_ref(), transaction, }) } diff --git a/postgres/src/transaction_builder.rs b/postgres/src/transaction_builder.rs index d87d1a128..e0f8a56e8 100644 --- a/postgres/src/transaction_builder.rs +++ b/postgres/src/transaction_builder.rs @@ -1,18 +1,21 @@ +use crate::connection::ConnectionRef; use crate::{Error, IsolationLevel, Transaction}; -use tokio::runtime::Runtime; /// A builder for database transactions. pub struct TransactionBuilder<'a> { - runtime: &'a mut Runtime, + connection: ConnectionRef<'a>, builder: tokio_postgres::TransactionBuilder<'a>, } impl<'a> TransactionBuilder<'a> { pub(crate) fn new( - runtime: &'a mut Runtime, + connection: ConnectionRef<'a>, builder: tokio_postgres::TransactionBuilder<'a>, ) -> TransactionBuilder<'a> { - TransactionBuilder { runtime, builder } + TransactionBuilder { + connection, + builder, + } } /// Sets the isolation level of the transaction. @@ -40,8 +43,8 @@ impl<'a> TransactionBuilder<'a> { /// Begins the transaction. /// /// The transaction will roll back by default - use the `commit` method to commit it. - pub fn start(self) -> Result, Error> { - let transaction = self.runtime.block_on(self.builder.start())?; - Ok(Transaction::new(self.runtime, transaction)) + pub fn start(mut self) -> Result, Error> { + let transaction = self.connection.block_on(self.builder.start())?; + Ok(Transaction::new(self.connection, transaction)) } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 738601159..b01037edc 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -559,11 +559,9 @@ async fn copy_out() { .copy_out(&stmt) .await .unwrap() - .try_fold(BytesMut::new(), |mut buf, chunk| { - async move { - buf.extend_from_slice(&chunk); - Ok(buf) - } + .try_fold(BytesMut::new(), |mut buf, chunk| async move { + buf.extend_from_slice(&chunk); + Ok(buf) }) .await .unwrap(); From 3c4a0af6ffa8f4bc7aefdbf0ef133304d6bf1b01 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 22 Mar 2020 15:22:07 -0700 Subject: [PATCH 398/819] Add a notification API to the blocking client This mirrors the implementation in the old 0.15 release, but is quite a bit simpler now that we're built on the nonblocking API! --- postgres/Cargo.toml | 2 +- postgres/src/client.rs | 13 ++- postgres/src/connection.rs | 31 ++++++- postgres/src/lib.rs | 3 + postgres/src/notifications.rs | 161 ++++++++++++++++++++++++++++++++++ postgres/src/test.rs | 90 +++++++++++++++++++ 6 files changed, 292 insertions(+), 8 deletions(-) create mode 100644 postgres/src/notifications.rs diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index d23715cde..d0cf11004 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -35,7 +35,7 @@ fallible-iterator = "0.2" futures = "0.3" tokio-postgres = { version = "0.5.3", path = "../tokio-postgres" } -tokio = { version = "0.2", features = ["rt-core"] } +tokio = { version = "0.2", features = ["rt-core", "time"] } log = "0.4" [dev-dependencies] diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 3ae5f86cc..a0c61b33d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -1,7 +1,7 @@ use crate::connection::Connection; use crate::{ - CancelToken, Config, CopyInWriter, CopyOutReader, RowIter, Statement, ToStatement, Transaction, - TransactionBuilder, + CancelToken, Config, CopyInWriter, CopyOutReader, Notifications, RowIter, Statement, + ToStatement, Transaction, TransactionBuilder, }; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; @@ -471,6 +471,13 @@ impl Client { TransactionBuilder::new(self.connection.as_ref(), self.client.build_transaction()) } + /// Returns a structure providing access to asynchronous notifications. + /// + /// Use the `LISTEN` command to register this connection for notifications. + pub fn notifications(&mut self) -> Notifications<'_> { + Notifications::new(self.connection.as_ref()) + } + /// Constructs a cancellation token that can later be used to request /// cancellation of a query running on this connection. /// @@ -490,7 +497,7 @@ impl Client { /// thread::spawn(move || { /// // Abort the query after 5s. /// thread::sleep(Duration::from_secs(5)); - /// cancel_token.cancel_query(NoTls); + /// let _ = cancel_token.cancel_query(NoTls); /// }); /// /// match client.simple_query("SELECT long_running_query()") { diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index 440ad5daf..acea5eca7 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -34,16 +34,30 @@ impl Connection { ConnectionRef { connection: self } } + pub fn enter(&self, f: F) -> T + where + F: FnOnce() -> T, + { + self.runtime.enter(f) + } + pub fn block_on(&mut self, future: F) -> Result where F: Future>, { pin_mut!(future); + self.poll_block_on(|cx, _, _| future.as_mut().poll(cx)) + } + + pub fn poll_block_on(&mut self, mut f: F) -> Result + where + F: FnMut(&mut Context<'_>, &mut VecDeque, bool) -> Poll>, + { let connection = &mut self.connection; let notifications = &mut self.notifications; self.runtime.block_on({ future::poll_fn(|cx| { - loop { + let done = loop { match connection.as_mut().poll_next(cx) { Poll::Ready(Some(Ok(AsyncMessage::Notification(notification)))) => { notifications.push_back(notification); @@ -53,14 +67,23 @@ impl Connection { } Poll::Ready(Some(Ok(_))) => {} Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e)), - Poll::Ready(None) | Poll::Pending => break, + Poll::Ready(None) => break true, + Poll::Pending => break false, } - } + }; - future.as_mut().poll(cx) + f(cx, notifications, done) }) }) } + + pub fn notifications(&self) -> &VecDeque { + &self.notifications + } + + pub fn notifications_mut(&mut self) -> &mut VecDeque { + &mut self.notifications + } } pub struct ConnectionRef<'a> { diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 78b318b16..80380a87e 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -77,6 +77,8 @@ pub use crate::copy_out_reader::CopyOutReader; #[doc(no_inline)] pub use crate::error::Error; pub use crate::generic_client::GenericClient; +#[doc(inline)] +pub use crate::notifications::Notifications; #[doc(no_inline)] pub use crate::row::{Row, SimpleQueryRow}; pub use crate::row_iter::RowIter; @@ -94,6 +96,7 @@ mod copy_in_writer; mod copy_out_reader; mod generic_client; mod lazy_pin; +pub mod notifications; mod row_iter; mod transaction; mod transaction_builder; diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs new file mode 100644 index 000000000..e8c681548 --- /dev/null +++ b/postgres/src/notifications.rs @@ -0,0 +1,161 @@ +//! Asynchronous notifications. + +use crate::connection::ConnectionRef; +use crate::{Error, Notification}; +use fallible_iterator::FallibleIterator; +use futures::{ready, FutureExt}; +use std::task::Poll; +use std::time::Duration; +use tokio::time::{self, Delay, Instant}; + +/// Notifications from a PostgreSQL backend. +pub struct Notifications<'a> { + connection: ConnectionRef<'a>, +} + +impl<'a> Notifications<'a> { + pub(crate) fn new(connection: ConnectionRef<'a>) -> Notifications<'a> { + Notifications { connection } + } + + /// Returns the number of already buffered pending notifications. + pub fn len(&self) -> usize { + self.connection.notifications().len() + } + + /// Determines if there are any already buffered pending notifications. + pub fn is_empty(&self) -> bool { + self.connection.notifications().is_empty() + } + + /// Returns a nonblocking iterator over notifications. + /// + /// If there are no already buffered pending notifications, this iterator will poll the connection but will not + /// block waiting on notifications over the network. A return value of `None` either indicates that there are no + /// pending notifications or that the server has disconnected. + /// + /// # Note + /// + /// This iterator may start returning `Some` after previously returning `None` if more notifications are received. + pub fn iter(&mut self) -> Iter<'_> { + Iter { + connection: self.connection.as_ref(), + } + } + + /// Returns a blocking iterator over notifications. + /// + /// If there are no already buffered pending notifications, this iterator will block indefinitely waiting on the + /// PostgreSQL backend server to send one. It will only return `None` if the server has disconnected. + pub fn blocking_iter(&mut self) -> BlockingIter<'_> { + BlockingIter { + connection: self.connection.as_ref(), + } + } + + /// Returns an iterator over notifications which blocks a limited amount of time. + /// + /// If there are no already buffered pending notifications, this iterator will block waiting on the PostgreSQL + /// backend server to send one up to the provided timeout. A return value of `None` either indicates that there are + /// no pending notifications or that the server has disconnected. + /// + /// # Note + /// + /// This iterator may start returning `Some` after previously returning `None` if more notifications are received. + pub fn timeout_iter(&mut self, timeout: Duration) -> TimeoutIter<'_> { + TimeoutIter { + delay: self.connection.enter(|| time::delay_for(timeout)), + timeout, + connection: self.connection.as_ref(), + } + } +} + +/// A nonblocking iterator over pending notifications. +pub struct Iter<'a> { + connection: ConnectionRef<'a>, +} + +impl<'a> FallibleIterator for Iter<'a> { + type Item = Notification; + type Error = Error; + + fn next(&mut self) -> Result, Self::Error> { + if let Some(notification) = self.connection.notifications_mut().pop_front() { + return Ok(Some(notification)); + } + + self.connection + .poll_block_on(|_, notifications, _| Poll::Ready(Ok(notifications.pop_front()))) + } + + fn size_hint(&self) -> (usize, Option) { + (self.connection.notifications().len(), None) + } +} + +/// A blocking iterator over pending notifications. +pub struct BlockingIter<'a> { + connection: ConnectionRef<'a>, +} + +impl<'a> FallibleIterator for BlockingIter<'a> { + type Item = Notification; + type Error = Error; + + fn next(&mut self) -> Result, Self::Error> { + if let Some(notification) = self.connection.notifications_mut().pop_front() { + return Ok(Some(notification)); + } + + self.connection + .poll_block_on(|_, notifications, done| match notifications.pop_front() { + Some(notification) => Poll::Ready(Ok(Some(notification))), + None if done => Poll::Ready(Ok(None)), + None => Poll::Pending, + }) + } + + fn size_hint(&self) -> (usize, Option) { + (self.connection.notifications().len(), None) + } +} + +/// A time-limited blocking iterator over pending notifications. +pub struct TimeoutIter<'a> { + connection: ConnectionRef<'a>, + delay: Delay, + timeout: Duration, +} + +impl<'a> FallibleIterator for TimeoutIter<'a> { + type Item = Notification; + type Error = Error; + + fn next(&mut self) -> Result, Self::Error> { + if let Some(notification) = self.connection.notifications_mut().pop_front() { + self.delay.reset(Instant::now() + self.timeout); + return Ok(Some(notification)); + } + + let delay = &mut self.delay; + let timeout = self.timeout; + self.connection.poll_block_on(|cx, notifications, done| { + match notifications.pop_front() { + Some(notification) => { + delay.reset(Instant::now() + timeout); + return Poll::Ready(Ok(Some(notification))); + } + None if done => return Poll::Ready(Ok(None)), + None => {} + } + + ready!(delay.poll_unpin(cx)); + Poll::Ready(Ok(None)) + }) + } + + fn size_hint(&self) -> (usize, Option) { + (self.connection.notifications().len(), None) + } +} diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 449aac012..9edde8e32 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -309,3 +309,93 @@ fn cancel_query() { cancel_thread.join().unwrap(); } + +#[test] +fn notifications_iter() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute( + "\ + LISTEN notifications_iter; + NOTIFY notifications_iter, 'hello'; + NOTIFY notifications_iter, 'world'; + ", + ) + .unwrap(); + + let notifications = client.notifications().iter().collect::>().unwrap(); + assert_eq!(notifications.len(), 2); + assert_eq!(notifications[0].payload(), "hello"); + assert_eq!(notifications[1].payload(), "world"); +} + +#[test] +fn notifications_blocking_iter() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute( + "\ + LISTEN notifications_blocking_iter; + NOTIFY notifications_blocking_iter, 'hello'; + ", + ) + .unwrap(); + + thread::spawn(|| { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + thread::sleep(Duration::from_secs(1)); + client + .batch_execute("NOTIFY notifications_blocking_iter, 'world'") + .unwrap(); + }); + + let notifications = client + .notifications() + .blocking_iter() + .take(2) + .collect::>() + .unwrap(); + assert_eq!(notifications.len(), 2); + assert_eq!(notifications[0].payload(), "hello"); + assert_eq!(notifications[1].payload(), "world"); +} + +#[test] +fn notifications_timeout_iter() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute( + "\ + LISTEN notifications_timeout_iter; + NOTIFY notifications_timeout_iter, 'hello'; + ", + ) + .unwrap(); + + thread::spawn(|| { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + thread::sleep(Duration::from_secs(1)); + client + .batch_execute("NOTIFY notifications_timeout_iter, 'world'") + .unwrap(); + + thread::sleep(Duration::from_secs(10)); + client + .batch_execute("NOTIFY notifications_timeout_iter, '!'") + .unwrap(); + }); + + let notifications = client + .notifications() + .timeout_iter(Duration::from_secs(2)) + .collect::>() + .unwrap(); + assert_eq!(notifications.len(), 2); + assert_eq!(notifications[0].payload(), "hello"); + assert_eq!(notifications[1].payload(), "world"); +} From 2e0ad717f98b9e96ea379a0a3d5b8883661a86ec Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 22 Mar 2020 15:25:08 -0700 Subject: [PATCH 399/819] Bump CI for rustfmt change --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index acaec4a32..f3dae7101 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.40.0 + - image: rust:1.41.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:6 From dd0c39e0414e30e98271836b99ef289d04b7d569 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 1 Apr 2020 13:55:38 -0400 Subject: [PATCH 400/819] Don't accidentally return early from CopyOutReader The COPY OUT protocol allows sending CopyData packets that have no data. The (synchronous) CopyOutReader needs to be careful not to return an empty slice in this case, but instead request more data, since an empty slice is taken to mean EOF. --- postgres/src/copy_out_reader.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index a205d1a1a..92abebced 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -34,7 +34,7 @@ impl Read for CopyOutReader<'_> { impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { - if !self.cur.has_remaining() { + while !self.cur.has_remaining() { let mut stream = self.stream.pinned(); match self .connection @@ -42,7 +42,7 @@ impl BufRead for CopyOutReader<'_> { { Ok(Some(cur)) => self.cur = cur, Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), - Ok(None) => {} + Ok(None) => break, }; } From 64d6e97effbea03d3c7baf9eded405ef1ffbde43 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Fri, 1 May 2020 12:55:48 -0400 Subject: [PATCH 401/819] Re-add savepoint method to Transaction Revives #184. The rewrite for async/await and Tokio accidentally lost functionality that allowed users to assign specific names to savepoints when using nested transactions. This functionality had originally been added in #184 and had been updated in #374. This commit revives this functionality using a similar scheme to the one that existed before. This should allow CockroachDB users to update to the next patch release of version `0.17`. --- postgres/src/test.rs | 51 +++++++++++++++++++++++++++++++ postgres/src/transaction.rs | 13 +++++++- tokio-postgres/src/transaction.rs | 49 ++++++++++++++++++++--------- 3 files changed, 97 insertions(+), 16 deletions(-) diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 9edde8e32..6750118ba 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -151,6 +151,57 @@ fn nested_transactions() { assert_eq!(rows[2].get::<_, i32>(0), 4); } +#[test] +fn savepoints() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo (id) VALUES (1)", &[]) + .unwrap(); + + let mut savepoint1 = transaction.savepoint("savepoint1").unwrap(); + + savepoint1 + .execute("INSERT INTO foo (id) VALUES (2)", &[]) + .unwrap(); + + savepoint1.rollback().unwrap(); + + let rows = transaction + .query("SELECT id FROM foo ORDER BY id", &[]) + .unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); + + let mut savepoint2 = transaction.savepoint("savepoint2").unwrap(); + + savepoint2 + .execute("INSERT INTO foo (id) VALUES(3)", &[]) + .unwrap(); + + let mut savepoint3 = savepoint2.savepoint("savepoint3").unwrap(); + + savepoint3 + .execute("INSERT INTO foo (id) VALUES(4)", &[]) + .unwrap(); + + savepoint3.commit().unwrap(); + savepoint2.commit().unwrap(); + transaction.commit().unwrap(); + + let rows = client.query("SELECT id FROM foo ORDER BY id", &[]).unwrap(); + assert_eq!(rows.len(), 3); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[1].get::<_, i32>(0), 3); + assert_eq!(rows[2].get::<_, i32>(0), 4); +} + #[test] fn copy_in() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 25bfff578..1a021cd0a 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -173,7 +173,7 @@ impl<'a> Transaction<'a> { CancelToken::new(self.transaction.cancel_token()) } - /// Like `Client::transaction`. + /// Like `Client::transaction`, but creates a nested transaction via a savepoint. pub fn transaction(&mut self) -> Result, Error> { let transaction = self.connection.block_on(self.transaction.transaction())?; Ok(Transaction { @@ -181,4 +181,15 @@ impl<'a> Transaction<'a> { transaction, }) } + /// Like `Client::transaction`, but creates a nested transaction via a savepoint with the specified name. + pub fn savepoint(&mut self, name: I) -> Result, Error> + where + I: Into, + { + let transaction = self.connection.block_on(self.transaction.savepoint(name))?; + Ok(Transaction { + connection: self.connection.as_ref(), + transaction, + }) + } } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index a1ee4f6cb..38fdf7cea 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -23,20 +23,26 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// transaction. Transactions can be nested, with inner transactions implemented via safepoints. pub struct Transaction<'a> { client: &'a mut Client, - depth: u32, + savepoint: Option, done: bool, } +/// A representation of a PostgreSQL database savepoint. +struct Savepoint { + name: String, + depth: u32, +} + impl<'a> Drop for Transaction<'a> { fn drop(&mut self) { if self.done { return; } - let query = if self.depth == 0 { - "ROLLBACK".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("ROLLBACK TO {}", sp.name) } else { - format!("ROLLBACK TO sp{}", self.depth) + "ROLLBACK".to_string() }; let buf = self.client.inner().with_buf(|buf| { frontend::query(&query, buf).unwrap(); @@ -53,7 +59,7 @@ impl<'a> Transaction<'a> { pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { Transaction { client, - depth: 0, + savepoint: None, done: false, } } @@ -61,10 +67,10 @@ impl<'a> Transaction<'a> { /// Consumes the transaction, committing all changes made within it. pub async fn commit(mut self) -> Result<(), Error> { self.done = true; - let query = if self.depth == 0 { - "COMMIT".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("RELEASE {}", sp.name) } else { - format!("RELEASE sp{}", self.depth) + "COMMIT".to_string() }; self.client.batch_execute(&query).await } @@ -74,10 +80,10 @@ impl<'a> Transaction<'a> { /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub async fn rollback(mut self) -> Result<(), Error> { self.done = true; - let query = if self.depth == 0 { - "ROLLBACK".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("ROLLBACK TO {}", sp.name) } else { - format!("ROLLBACK TO sp{}", self.depth) + "ROLLBACK".to_string() }; self.client.batch_execute(&query).await } @@ -272,15 +278,28 @@ impl<'a> Transaction<'a> { self.client.cancel_query_raw(stream, tls).await } - /// Like `Client::transaction`. + /// Like `Client::transaction`, but creates a nested transaction via a savepoint. pub async fn transaction(&mut self) -> Result, Error> { - let depth = self.depth + 1; - let query = format!("SAVEPOINT sp{}", depth); + self._savepoint(None).await + } + + /// Like `Client::transaction`, but creates a nested transaction via a savepoint with the specified name. + pub async fn savepoint(&mut self, name: I) -> Result, Error> + where + I: Into, + { + self._savepoint(Some(name.into())).await + } + + async fn _savepoint(&mut self, name: Option) -> Result, Error> { + let depth = self.savepoint.as_ref().map_or(0, |sp| sp.depth) + 1; + let name = name.unwrap_or_else(|| format!("sp_{}", depth)); + let query = format!("SAVEPOINT {}", name); self.batch_execute(&query).await?; Ok(Transaction { client: self.client, - depth, + savepoint: Some(Savepoint { name, depth }), done: false, }) } From 66f5a8bbc58f92b5b048f75db7e036eec646f503 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 1 May 2020 16:10:41 -0700 Subject: [PATCH 402/819] Release tokio-postgres v0.5.4 --- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index e65f00f14..de25e5616 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.4- 2020-05-01 + +### Added + +* Added `Transaction::savepoint`, which can be used to create a savepoint with a custom name. + ## v0.5.3 - 2020-03-05 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index d1571c352..46a86d1e7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.3" +version = "0.5.4" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From f9ba58967ba2bbbbb6ff9c84a07adae246a8e316 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 1 May 2020 16:15:31 -0700 Subject: [PATCH 403/819] Release postgres v0.17.3 --- postgres/CHANGELOG.md | 11 +++++++++++ postgres/Cargo.toml | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 550f0c706..d951ed445 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,16 @@ # Change Log +## v0.17.3 - 2020-05-01 + +### Fixed + +* Errors sent by the server will now be returned from `Client` methods rather than just being logs. + +### Added + +* Added `Transaction::savepoint`, which can be used to create a savepoint with a custom name. +* Added `Client::notifications`, which returns an interface to the notifications sent by the server. + ## v0.17.2 - 2020-03-05 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index d0cf11004..601095da0 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.2" +version = "0.17.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -33,7 +33,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.5.3", path = "../tokio-postgres" } +tokio-postgres = { version = "0.5.4", path = "../tokio-postgres" } tokio = { version = "0.2", features = ["rt-core", "time"] } log = "0.4" From a1efddd64a47aca595d5801a6ae40c51832eb0cf Mon Sep 17 00:00:00 2001 From: Naim A <227396+naim94a@users.noreply.github.com> Date: Tue, 12 May 2020 13:35:10 +0300 Subject: [PATCH 404/819] fix typo --- postgres/src/lib.rs | 2 +- tokio-postgres/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 80380a87e..92a75bd84 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -57,7 +57,7 @@ //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | -//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | #![doc(html_root_url = "https://docs.rs/postgres/0.17")] diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2845fcf61..9e7210507 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -108,7 +108,7 @@ //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | -//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] From 887be868164ae53d1938a6669b1ca30176d52678 Mon Sep 17 00:00:00 2001 From: Jakub Wieczorek Date: Mon, 25 May 2020 12:08:17 +0200 Subject: [PATCH 405/819] Add support for geo-types=0.5 Support for geo-types=0.4 (via the `with-geo-types_04` feature) has been preserved for convenience. --- postgres-types/Cargo.toml | 2 + postgres-types/src/geo_types_05.rs | 72 ++++++++++++++++++++++++++++++ postgres-types/src/lib.rs | 2 + postgres/Cargo.toml | 1 + postgres/src/lib.rs | 3 +- tokio-postgres/Cargo.toml | 2 + tokio-postgres/src/lib.rs | 3 +- 7 files changed, 83 insertions(+), 2 deletions(-) create mode 100644 postgres-types/src/geo_types_05.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index a12f1f513..57cc79c85 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -16,6 +16,7 @@ with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] with-geo-types-0_4 = ["geo-types-04"] +with-geo-types-0_5 = ["geo-types-05"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] with-time-0_2 = ["time-02"] @@ -30,6 +31,7 @@ bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } +geo-types-05 = { version = "0.5", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/geo_types_05.rs b/postgres-types/src/geo_types_05.rs new file mode 100644 index 000000000..f169134e3 --- /dev/null +++ b/postgres-types/src/geo_types_05.rs @@ -0,0 +1,72 @@ +use bytes::BytesMut; +use fallible_iterator::FallibleIterator; +use geo_types_05::{Coordinate, LineString, Point, Rect}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Point { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let point = types::point_from_sql(raw)?; + Ok(Point::new(point.x(), point.y())) + } + + accepts!(POINT); +} + +impl ToSql for Point { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::point_to_sql(self.x(), self.y(), out); + Ok(IsNull::No) + } + + accepts!(POINT); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Rect { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let rect = types::box_from_sql(raw)?; + Ok(Rect::new( + (rect.lower_left().x(), rect.lower_left().y()), + (rect.upper_right().x(), rect.upper_right().y()), + )) + } + + accepts!(BOX); +} + +impl ToSql for Rect { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::box_to_sql(self.min().x, self.min().y, self.max().x, self.max().y, out); + Ok(IsNull::No) + } + + accepts!(BOX); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for LineString { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let path = types::path_from_sql(raw)?; + let points = path + .points() + .map(|p| Ok(Coordinate { x: p.x(), y: p.y() })) + .collect()?; + Ok(LineString(points)) + } + + accepts!(PATH); +} + +impl ToSql for LineString { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + let closed = false; // always encode an open path from LineString + types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; + Ok(IsNull::No) + } + + accepts!(PATH); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 1479c1264..b1bb0300d 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -196,6 +196,8 @@ mod chrono_04; mod eui48_04; #[cfg(feature = "with-geo-types-0_4")] mod geo_types_04; +#[cfg(feature = "with-geo-types-0_5")] +mod geo_types_05; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 601095da0..4742396e3 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -25,6 +25,7 @@ with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] +with-geo-types-0_5 = ["tokio-postgres/with-geo-types-0_5"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 92a75bd84..dc0fd4440 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -56,7 +56,8 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | +//! | `with-geo-types-0_4` | Enable support for the 0.4 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.4.0) 0.4 | no | +//! | `with-geo-types-0_5` | Enable support for the 0.5 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.5.0) 0.5 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 46a86d1e7..aaed29ae3 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -31,6 +31,7 @@ with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] +with-geo-types-0_5 = ["postgres-types/with-geo-types-0_5"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-time-0_2 = ["postgres-types/with-time-0_2"] @@ -60,6 +61,7 @@ bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono" } eui48-04 = { version = "0.4", package = "eui48" } geo-types-04 = { version = "0.4", package = "geo-types" } +geo-types-05 = { version = "0.5", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 9e7210507..3b96f4a0e 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -107,7 +107,8 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | +//! | `with-geo-types-0_4` | Enable support for the 0.4 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.4.0) 0.4 | no | +//! | `with-geo-types-0_5` | Enable support for the 0.5 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.5.0) 0.5 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | From e7661fd71ff17acff3052db22bcf8223faeaae2e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 25 May 2020 05:48:40 -0700 Subject: [PATCH 406/819] Fix geo-types tests --- .../types/{geo_010.rs => geo_types_04.rs} | 2 +- .../tests/test/types/geo_types_05.rs | 60 +++++++++++++++++++ tokio-postgres/tests/test/types/mod.rs | 6 +- 3 files changed, 65 insertions(+), 3 deletions(-) rename tokio-postgres/tests/test/types/{geo_010.rs => geo_types_04.rs} (95%) create mode 100644 tokio-postgres/tests/test/types/geo_types_05.rs diff --git a/tokio-postgres/tests/test/types/geo_010.rs b/tokio-postgres/tests/test/types/geo_types_04.rs similarity index 95% rename from tokio-postgres/tests/test/types/geo_010.rs rename to tokio-postgres/tests/test/types/geo_types_04.rs index 6e3d835b9..b26fb4091 100644 --- a/tokio-postgres/tests/test/types/geo_010.rs +++ b/tokio-postgres/tests/test/types/geo_types_04.rs @@ -1,4 +1,4 @@ -use geo_010::{Coordinate, LineString, Point, Rect}; +use geo_types_04::{Coordinate, LineString, Point, Rect}; use crate::types::test_type; diff --git a/tokio-postgres/tests/test/types/geo_types_05.rs b/tokio-postgres/tests/test/types/geo_types_05.rs new file mode 100644 index 000000000..b53951c1d --- /dev/null +++ b/tokio-postgres/tests/test/types/geo_types_05.rs @@ -0,0 +1,60 @@ +use geo_types_05::{Coordinate, LineString, Point, Rect}; + +use crate::types::test_type; + +#[tokio::test] +async fn test_point_params() { + test_type( + "POINT", + &[ + (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), + (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_box_params() { + test_type( + "BOX", + &[ + ( + Some(Rect::new( + Coordinate { x: -3.14, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, + )), + "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", + ), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_path_params() { + let points = vec![ + Coordinate { x: 0., y: 0. }, + Coordinate { x: -3.14, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, + ]; + test_type( + "PATH", + &[ + ( + Some(LineString(points)), + "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", + ), + (None, "NULL"), + ], + ) + .await; +} diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 5d292db52..9f96019fe 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -18,8 +18,10 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-0_10")] -mod geo_010; +#[cfg(feature = "with-geo-types-0_4")] +mod geo_types_04; +#[cfg(feature = "with-geo-types-0_5")] +mod geo_types_05; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] From 2b59b7e63c2013116dc0335a7e7f891cde6a235a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 25 May 2020 05:54:19 -0700 Subject: [PATCH 407/819] fix clippy --- postgres-types/src/lib.rs | 1 - tokio-postgres/tests/test/types/geo_types_04.rs | 10 +++++----- tokio-postgres/tests/test/types/geo_types_05.rs | 10 +++++----- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index b1bb0300d..dbde5eb04 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -109,7 +109,6 @@ #![warn(clippy::all, rust_2018_idioms, missing_docs)] use fallible_iterator::FallibleIterator; -use postgres_protocol; use postgres_protocol::types::{self, ArrayDimension}; use std::any::type_name; use std::borrow::Cow; diff --git a/tokio-postgres/tests/test/types/geo_types_04.rs b/tokio-postgres/tests/test/types/geo_types_04.rs index b26fb4091..a4f0ac6f6 100644 --- a/tokio-postgres/tests/test/types/geo_types_04.rs +++ b/tokio-postgres/tests/test/types/geo_types_04.rs @@ -8,7 +8,7 @@ async fn test_point_params() { "POINT", &[ (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), - (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), + (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), (None, "NULL"), ], ) @@ -22,13 +22,13 @@ async fn test_box_params() { &[ ( Some(Rect { - min: Coordinate { x: -3.14, y: 1.618 }, + min: Coordinate { x: -3.2, y: 1.618 }, max: Coordinate { x: 160.0, y: 69701.5615, }, }), - "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", + "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", ), (None, "NULL"), ], @@ -40,7 +40,7 @@ async fn test_box_params() { async fn test_path_params() { let points = vec![ Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.14, y: 1.618 }, + Coordinate { x: -3.2, y: 1.618 }, Coordinate { x: 160.0, y: 69701.5615, @@ -51,7 +51,7 @@ async fn test_path_params() { &[ ( Some(LineString(points)), - "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", + "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", ), (None, "NULL"), ], diff --git a/tokio-postgres/tests/test/types/geo_types_05.rs b/tokio-postgres/tests/test/types/geo_types_05.rs index b53951c1d..334dc0b13 100644 --- a/tokio-postgres/tests/test/types/geo_types_05.rs +++ b/tokio-postgres/tests/test/types/geo_types_05.rs @@ -8,7 +8,7 @@ async fn test_point_params() { "POINT", &[ (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), - (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), + (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), (None, "NULL"), ], ) @@ -22,13 +22,13 @@ async fn test_box_params() { &[ ( Some(Rect::new( - Coordinate { x: -3.14, y: 1.618 }, + Coordinate { x: -3.2, y: 1.618 }, Coordinate { x: 160.0, y: 69701.5615, }, )), - "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", + "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", ), (None, "NULL"), ], @@ -40,7 +40,7 @@ async fn test_box_params() { async fn test_path_params() { let points = vec![ Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.14, y: 1.618 }, + Coordinate { x: -3.2, y: 1.618 }, Coordinate { x: 160.0, y: 69701.5615, @@ -51,7 +51,7 @@ async fn test_path_params() { &[ ( Some(LineString(points)), - "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", + "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", ), (None, "NULL"), ], From 58a7856646723e1bcfc06921d878a7fadc5c6281 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 10 Jun 2020 16:45:58 -0700 Subject: [PATCH 408/819] Update hmac and sha2 --- postgres-protocol/Cargo.toml | 4 +-- postgres-protocol/src/authentication/sasl.rs | 35 ++++++++++---------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ebad1aefc..d7be8eb80 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,9 +13,9 @@ base64 = "0.12" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" -hmac = "0.7" +hmac = "0.8" md5 = "0.7" memchr = "2.0" rand = "0.7" -sha2 = "0.8" +sha2 = "0.9" stringprep = "0.1" diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index af458bbaf..994f0c4b9 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,8 +1,9 @@ //! SASL-based authentication support. -use hmac::{Hmac, Mac}; +use hmac::{Hmac, Mac, NewMac}; use rand::{self, Rng}; use sha2::{Digest, Sha256}; +use sha2::digest::FixedOutput; use std::fmt::Write; use std::io; use std::iter; @@ -33,16 +34,16 @@ fn normalize(pass: &[u8]) -> Vec { fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { let mut hmac = Hmac::::new_varkey(str).expect("HMAC is able to accept all key sizes"); - hmac.input(salt); - hmac.input(&[0, 0, 0, 1]); - let mut prev = hmac.result().code(); + hmac.update(salt); + hmac.update(&[0, 0, 0, 1]); + let mut prev = hmac.finalize().into_bytes(); let mut hi = prev; for _ in 1..i { let mut hmac = Hmac::::new_varkey(str).expect("already checked above"); - hmac.input(prev.as_slice()); - prev = hmac.result().code(); + hmac.update(&prev); + prev = hmac.finalize().into_bytes(); for (hi, prev) in hi.iter_mut().zip(prev) { *hi ^= prev; @@ -196,12 +197,12 @@ impl ScramSha256 { let mut hmac = Hmac::::new_varkey(&salted_password) .expect("HMAC is able to accept all key sizes"); - hmac.input(b"Client Key"); - let client_key = hmac.result().code(); + hmac.update(b"Client Key"); + let client_key = hmac.finalize().into_bytes(); let mut hash = Sha256::default(); - hash.input(client_key.as_slice()); - let stored_key = hash.result(); + hash.update(client_key.as_slice()); + let stored_key = hash.finalize_fixed(); let mut cbind_input = vec![]; cbind_input.extend(channel_binding.gs2_header().as_bytes()); @@ -215,11 +216,11 @@ impl ScramSha256 { let mut hmac = Hmac::::new_varkey(&stored_key).expect("HMAC is able to accept all key sizes"); - hmac.input(auth_message.as_bytes()); - let client_signature = hmac.result(); + hmac.update(auth_message.as_bytes()); + let client_signature = hmac.finalize().into_bytes(); let mut client_proof = client_key; - for (proof, signature) in client_proof.iter_mut().zip(client_signature.code()) { + for (proof, signature) in client_proof.iter_mut().zip(client_signature) { *proof ^= signature; } @@ -267,12 +268,12 @@ impl ScramSha256 { let mut hmac = Hmac::::new_varkey(&salted_password) .expect("HMAC is able to accept all key sizes"); - hmac.input(b"Server Key"); - let server_key = hmac.result(); + hmac.update(b"Server Key"); + let server_key = hmac.finalize().into_bytes(); - let mut hmac = Hmac::::new_varkey(&server_key.code()) + let mut hmac = Hmac::::new_varkey(&server_key) .expect("HMAC is able to accept all key sizes"); - hmac.input(auth_message.as_bytes()); + hmac.update(auth_message.as_bytes()); hmac.verify(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) } From a12efc41d7656f39ad23732f735264abd654bbe1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 10 Jun 2020 16:46:55 -0700 Subject: [PATCH 409/819] rustfmt --- postgres-protocol/src/authentication/sasl.rs | 6 +++--- postgres-types/src/lib.rs | 17 +++++++++-------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 994f0c4b9..c99a27a2e 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -2,8 +2,8 @@ use hmac::{Hmac, Mac, NewMac}; use rand::{self, Rng}; -use sha2::{Digest, Sha256}; use sha2::digest::FixedOutput; +use sha2::{Digest, Sha256}; use std::fmt::Write; use std::io; use std::iter; @@ -271,8 +271,8 @@ impl ScramSha256 { hmac.update(b"Server Key"); let server_key = hmac.finalize().into_bytes(); - let mut hmac = Hmac::::new_varkey(&server_key) - .expect("HMAC is able to accept all key sizes"); + let mut hmac = + Hmac::::new_varkey(&server_key).expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); hmac.verify(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index dbde5eb04..e78cedf4a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -158,16 +158,17 @@ macro_rules! accepts { #[macro_export] macro_rules! to_sql_checked { () => { - fn to_sql_checked(&self, - ty: &$crate::Type, - out: &mut $crate::private::BytesMut) - -> ::std::result::Result<$crate::IsNull, - Box> { + fn to_sql_checked( + &self, + ty: &$crate::Type, + out: &mut $crate::private::BytesMut, + ) -> ::std::result::Result< + $crate::IsNull, + Box, + > { $crate::__to_sql_checked(self, ty, out) } - } + }; } // WARNING: this function is not considered part of this crate's public API. From 3e67dbb773d7af47e0d19d9dc3688fc5db731c0c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 10 Jun 2020 16:54:07 -0700 Subject: [PATCH 410/819] Fix clippy --- codegen/src/type_gen.rs | 45 +++++++++++----------------- postgres/src/copy_out_reader.rs | 2 +- tokio-postgres/src/error/mod.rs | 20 ++++++------- tokio-postgres/src/generic_client.rs | 1 + 4 files changed, 30 insertions(+), 38 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 99a38ce87..010b30c53 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -319,46 +319,42 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { .unwrap(); for (oid, type_) in types { - write!( + writeln!( w, - " {} => Some(Inner::{}), -", + " {} => Some(Inner::{}),", oid, type_.variant ) .unwrap(); } - write!( + writeln!( w, " _ => None, }} }} pub fn oid(&self) -> Oid {{ - match *self {{ -", + match *self {{", ) .unwrap(); for (oid, type_) in types { - write!( + writeln!( w, - " Inner::{} => {}, -", + " Inner::{} => {},", type_.variant, oid ) .unwrap(); } - write!( + writeln!( w, " Inner::Other(ref u) => u.oid, }} }} pub fn kind(&self) -> &Kind {{ - match *self {{ -", + match *self {{", ) .unwrap(); @@ -370,46 +366,42 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { _ => "Simple".to_owned(), }; - write!( + writeln!( w, " Inner::{} => {{ &Kind::{} - }} -", + }}", type_.variant, kind ) .unwrap(); } - write!( + writeln!( w, r#" Inner::Other(ref u) => &u.kind, }} }} pub fn name(&self) -> &str {{ - match *self {{ -"#, + match *self {{"#, ) .unwrap(); for type_ in types.values() { - write!( + writeln!( w, - r#" Inner::{} => "{}", -"#, + r#" Inner::{} => "{}","#, type_.variant, type_.name ) .unwrap(); } - write!( + writeln!( w, " Inner::Other(ref u) => &u.name, }} }} -}} -" +}}" ) .unwrap(); } @@ -417,12 +409,11 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { fn make_consts(w: &mut BufWriter, types: &BTreeMap) { write!(w, "impl Type {{").unwrap(); for type_ in types.values() { - write!( + writeln!( w, " /// {docs} - pub const {ident}: Type = Type(Inner::{variant}); -", + pub const {ident}: Type = Type(Inner::{variant});", docs = type_.doc, ident = type_.ident, variant = type_.variant diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index 92abebced..fd9c27fb0 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -38,7 +38,7 @@ impl BufRead for CopyOutReader<'_> { let mut stream = self.stream.pinned(); match self .connection - .block_on({ async { stream.next().await.transpose() } }) + .block_on(async { stream.next().await.transpose() }) { Ok(Some(cur)) => self.cur = cur, Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 788e70cf4..0bcf0c6f9 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -224,7 +224,7 @@ impl DbError { /// /// Might run to multiple lines. pub fn detail(&self) -> Option<&str> { - self.detail.as_ref().map(|s| &**s) + self.detail.as_deref() } /// An optional suggestion what to do about the problem. @@ -233,7 +233,7 @@ impl DbError { /// (potentially inappropriate) rather than hard facts. Might run to /// multiple lines. pub fn hint(&self) -> Option<&str> { - self.hint.as_ref().map(|s| &**s) + self.hint.as_deref() } /// An optional error cursor position into either the original query string @@ -248,20 +248,20 @@ impl DbError { /// language functions and internally-generated queries. The trace is one /// entry per line, most recent first. pub fn where_(&self) -> Option<&str> { - self.where_.as_ref().map(|s| &**s) + self.where_.as_deref() } /// If the error was associated with a specific database object, the name /// of the schema containing that object, if any. (PostgreSQL 9.3+) pub fn schema(&self) -> Option<&str> { - self.schema.as_ref().map(|s| &**s) + self.schema.as_deref() } /// If the error was associated with a specific table, the name of the /// table. (Refer to the schema name field for the name of the table's /// schema.) (PostgreSQL 9.3+) pub fn table(&self) -> Option<&str> { - self.table.as_ref().map(|s| &**s) + self.table.as_deref() } /// If the error was associated with a specific table column, the name of @@ -270,14 +270,14 @@ impl DbError { /// (Refer to the schema and table name fields to identify the table.) /// (PostgreSQL 9.3+) pub fn column(&self) -> Option<&str> { - self.column.as_ref().map(|s| &**s) + self.column.as_deref() } /// If the error was associated with a specific data type, the name of the /// data type. (Refer to the schema name field for the name of the data /// type's schema.) (PostgreSQL 9.3+) pub fn datatype(&self) -> Option<&str> { - self.datatype.as_ref().map(|s| &**s) + self.datatype.as_deref() } /// If the error was associated with a specific constraint, the name of the @@ -287,12 +287,12 @@ impl DbError { /// (For this purpose, indexes are treated as constraints, even if they /// weren't created with constraint syntax.) (PostgreSQL 9.3+) pub fn constraint(&self) -> Option<&str> { - self.constraint.as_ref().map(|s| &**s) + self.constraint.as_deref() } /// The file name of the source-code location where the error was reported. pub fn file(&self) -> Option<&str> { - self.file.as_ref().map(|s| &**s) + self.file.as_deref() } /// The line number of the source-code location where the error was @@ -303,7 +303,7 @@ impl DbError { /// The name of the source-code routine reporting the error. pub fn routine(&self) -> Option<&str> { - self.routine.as_ref().map(|s| &**s) + self.routine.as_deref() } } diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 30351bd0a..ad318e864 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -146,6 +146,7 @@ impl GenericClient for Client { impl private::Sealed for Transaction<'_> {} #[async_trait] +#[allow(clippy::needless_lifetimes)] impl GenericClient for Transaction<'_> { async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where From c845a3683e9dda42c0aa9a13dd32ee1d9c74b752 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 10 Jun 2020 16:56:22 -0700 Subject: [PATCH 411/819] rustfmt --- codegen/src/type_gen.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 010b30c53..485442a3f 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -319,12 +319,7 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { .unwrap(); for (oid, type_) in types { - writeln!( - w, - " {} => Some(Inner::{}),", - oid, type_.variant - ) - .unwrap(); + writeln!(w, " {} => Some(Inner::{}),", oid, type_.variant).unwrap(); } writeln!( @@ -339,12 +334,7 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { .unwrap(); for (oid, type_) in types { - writeln!( - w, - " Inner::{} => {},", - type_.variant, oid - ) - .unwrap(); + writeln!(w, " Inner::{} => {},", type_.variant, oid).unwrap(); } writeln!( From 5d9acb1eb15dce2a17fc9d6f68adf6519355ebc5 Mon Sep 17 00:00:00 2001 From: Jakub Wieczorek Date: Wed, 24 Jun 2020 12:28:21 +0200 Subject: [PATCH 412/819] Add support for the 0.6 version of `geo_types`. This change drops the support for the 0.5 version by renaming the feature, as no version of `postgres` with it included has ever been released. --- postgres-types/Cargo.toml | 4 ++-- postgres-types/src/{geo_types_05.rs => geo_types_06.rs} | 2 +- postgres-types/src/lib.rs | 4 ++-- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 4 ++-- .../tests/test/types/{geo_types_05.rs => geo_types_06.rs} | 2 +- tokio-postgres/tests/test/types/mod.rs | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) rename postgres-types/src/{geo_types_05.rs => geo_types_06.rs} (97%) rename tokio-postgres/tests/test/types/{geo_types_05.rs => geo_types_06.rs} (95%) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 57cc79c85..afda3093d 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -16,7 +16,7 @@ with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] with-geo-types-0_4 = ["geo-types-04"] -with-geo-types-0_5 = ["geo-types-05"] +with-geo-types-0_6 = ["geo-types-06"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] with-time-0_2 = ["time-02"] @@ -31,7 +31,7 @@ bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4", package = "chrono", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } -geo-types-05 = { version = "0.5", package = "geo-types", optional = true } +geo-types-06 = { version = "0.6", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/geo_types_05.rs b/postgres-types/src/geo_types_06.rs similarity index 97% rename from postgres-types/src/geo_types_05.rs rename to postgres-types/src/geo_types_06.rs index f169134e3..0f0b14fd9 100644 --- a/postgres-types/src/geo_types_05.rs +++ b/postgres-types/src/geo_types_06.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use geo_types_05::{Coordinate, LineString, Point, Rect}; +use geo_types_06::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index e78cedf4a..258b68edc 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -196,8 +196,8 @@ mod chrono_04; mod eui48_04; #[cfg(feature = "with-geo-types-0_4")] mod geo_types_04; -#[cfg(feature = "with-geo-types-0_5")] -mod geo_types_05; +#[cfg(feature = "with-geo-types-0_6")] +mod geo_types_06; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 4742396e3..cd199e27f 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -25,7 +25,7 @@ with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] -with-geo-types-0_5 = ["tokio-postgres/with-geo-types-0_5"] +with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index aaed29ae3..63c2dc6ce 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -31,7 +31,7 @@ with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] -with-geo-types-0_5 = ["postgres-types/with-geo-types-0_5"] +with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-time-0_2 = ["postgres-types/with-time-0_2"] @@ -61,7 +61,7 @@ bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono" } eui48-04 = { version = "0.4", package = "eui48" } geo-types-04 = { version = "0.4", package = "geo-types" } -geo-types-05 = { version = "0.5", package = "geo-types" } +geo-types-06 = { version = "0.6", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } diff --git a/tokio-postgres/tests/test/types/geo_types_05.rs b/tokio-postgres/tests/test/types/geo_types_06.rs similarity index 95% rename from tokio-postgres/tests/test/types/geo_types_05.rs rename to tokio-postgres/tests/test/types/geo_types_06.rs index 334dc0b13..7195abc06 100644 --- a/tokio-postgres/tests/test/types/geo_types_05.rs +++ b/tokio-postgres/tests/test/types/geo_types_06.rs @@ -1,4 +1,4 @@ -use geo_types_05::{Coordinate, LineString, Point, Rect}; +use geo_types_06::{Coordinate, LineString, Point, Rect}; use crate::types::test_type; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 9f96019fe..168ca3a4d 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -20,8 +20,8 @@ mod chrono_04; mod eui48_04; #[cfg(feature = "with-geo-types-0_4")] mod geo_types_04; -#[cfg(feature = "with-geo-types-0_5")] -mod geo_types_05; +#[cfg(feature = "with-geo-types-0_6")] +mod geo_types_06; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] From f5c1902d7d7483744c782d1c3a82fef575ee727a Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 24 Jun 2020 13:18:13 +0000 Subject: [PATCH 413/819] Update parking_lot requirement from 0.10 to 0.11 Updates the requirements on [parking_lot](https://github.com/Amanieu/parking_lot) to permit the latest version. - [Release notes](https://github.com/Amanieu/parking_lot/releases) - [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md) - [Commits](https://github.com/Amanieu/parking_lot/compare/0.10.0...0.11.0) Signed-off-by: dependabot-preview[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index aaed29ae3..b9869b486 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -43,7 +43,7 @@ byteorder = "1.0" fallible-iterator = "0.2" futures = "0.3" log = "0.4" -parking_lot = "0.10" +parking_lot = "0.11" percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" From fe904154df5c68ee7df9c0e9d8be279e935aff81 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 3 Jul 2020 15:16:54 -0400 Subject: [PATCH 414/819] Release postgres-types v0.1.2 --- postgres-types/CHANGELOG.md | 6 ++++++ postgres-types/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index f12c5c7f6..ddbdc6844 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.1.2 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + ## v0.1.1 - 2020-03-05 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index afda3093d..5295f7225 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.1" +version = "0.1.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 170c5a5e13230a2c7696362be12476079bc5d282 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 3 Jul 2020 15:19:15 -0400 Subject: [PATCH 415/819] Release tokio-postgres v0.5.5 --- tokio-postgres/CHANGELOG.md | 8 +++++++- tokio-postgres/Cargo.toml | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index de25e5616..a02c461a5 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,6 +1,12 @@ # Change Log -## v0.5.4- 2020-05-01 +## v0.5.5 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + +## v0.5.4 - 2020-05-01 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 6ab4f67be..dfc9bb0a9 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.4" +version = "0.5.5" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -48,7 +48,7 @@ percent-encoding = "2.0" pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-types = { version = "0.1.1", path = "../postgres-types" } +postgres-types = { version = "0.1.2", path = "../postgres-types" } tokio = { version = "0.2", features = ["io-util"] } tokio-util = { version = "0.3", features = ["codec"] } From 27b36f53e293c4bceba395870056af0dc27b6799 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 3 Jul 2020 15:23:33 -0400 Subject: [PATCH 416/819] Release postgres v0.17.3 --- postgres/CHANGELOG.md | 8 +++++++- postgres/Cargo.toml | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index d951ed445..a3ef16cb5 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,10 +1,16 @@ # Change Log +## v0.17.4 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + ## v0.17.3 - 2020-05-01 ### Fixed -* Errors sent by the server will now be returned from `Client` methods rather than just being logs. +* Errors sent by the server will now be returned from `Client` methods rather than just being logged. ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index cd199e27f..2b8a7fbd9 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.3" +version = "0.17.4" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -34,7 +34,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.5.4", path = "../tokio-postgres" } +tokio-postgres = { version = "0.5.5", path = "../tokio-postgres" } tokio = { version = "0.2", features = ["rt-core", "time"] } log = "0.4" From 90f763b968143c02dfe035fc4f1e8f0835b11349 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 6 Jul 2020 16:30:43 -0400 Subject: [PATCH 417/819] Release postgres-protocol v0.5.2 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index b099c1ffa..3cff60c51 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.2 - 2020-07-06 + +### Changed + +* Upgraded `hmac` and `sha2`. + ## v0.5.1 - 2020-03-17 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index d7be8eb80..6c26ac880 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.1" +version = "0.5.2" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From c9a21f839813c7f963bbfd48e43e9b127629c184 Mon Sep 17 00:00:00 2001 From: shelvacu Date: Mon, 13 Jul 2020 15:15:34 -0700 Subject: [PATCH 418/819] Add Debug and Clone to `AsyncMessage` --- tokio-postgres/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 3b96f4a0e..c69fff793 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -220,6 +220,7 @@ impl Notification { /// An asynchronous message from the server. #[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone)] #[non_exhaustive] pub enum AsyncMessage { /// A notice. From a4a68d543ddd064da706aa5d8b3f0e856330a80b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 19 Jul 2020 13:24:46 -0600 Subject: [PATCH 419/819] Ensure transactions roll back immediately on drop Closes #635 --- postgres/src/test.rs | 25 ++++++++++ postgres/src/transaction.rs | 91 +++++++++++++++++++++++-------------- 2 files changed, 83 insertions(+), 33 deletions(-) diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 6750118ba..35a6a0c79 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -100,6 +100,31 @@ fn transaction_drop() { assert_eq!(rows.len(), 0); } +#[test] +fn transaction_drop_immediate_rollback() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + let mut client2 = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .simple_query("CREATE TABLE IF NOT EXISTS foo (id SERIAL PRIMARY KEY)") + .unwrap(); + + client + .execute("INSERT INTO foo VALUES (1) ON CONFLICT DO NOTHING", &[]) + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("SELECT * FROM foo FOR UPDATE", &[]) + .unwrap(); + + drop(transaction); + + let rows = client2.query("SELECT * FROM foo FOR UPDATE", &[]).unwrap(); + assert_eq!(rows.len(), 1); +} + #[test] fn nested_transactions() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 1a021cd0a..3213b7c1f 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -9,7 +9,15 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// in the transaction. Transactions can be nested, with inner transactions implemented via savepoints. pub struct Transaction<'a> { connection: ConnectionRef<'a>, - transaction: tokio_postgres::Transaction<'a>, + transaction: Option>, +} + +impl<'a> Drop for Transaction<'a> { + fn drop(&mut self) { + if let Some(transaction) = self.transaction.take() { + let _ = self.connection.block_on(transaction.rollback()); + } + } } impl<'a> Transaction<'a> { @@ -19,31 +27,38 @@ impl<'a> Transaction<'a> { ) -> Transaction<'a> { Transaction { connection, - transaction, + transaction: Some(transaction), } } /// Consumes the transaction, committing all changes made within it. pub fn commit(mut self) -> Result<(), Error> { - self.connection.block_on(self.transaction.commit()) + self.connection + .block_on(self.transaction.take().unwrap().commit()) } /// Rolls the transaction back, discarding all changes made within it. /// /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub fn rollback(mut self) -> Result<(), Error> { - self.connection.block_on(self.transaction.rollback()) + self.connection + .block_on(self.transaction.take().unwrap().rollback()) } /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { - self.connection.block_on(self.transaction.prepare(query)) + self.connection + .block_on(self.transaction.as_ref().unwrap().prepare(query)) } /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.connection - .block_on(self.transaction.prepare_typed(query, types)) + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .prepare_typed(query, types), + ) } /// Like `Client::execute`. @@ -52,7 +67,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.execute(query, params)) + .block_on(self.transaction.as_ref().unwrap().execute(query, params)) } /// Like `Client::query`. @@ -61,7 +76,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query(query, params)) + .block_on(self.transaction.as_ref().unwrap().query(query, params)) } /// Like `Client::query_one`. @@ -70,7 +85,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query_one(query, params)) + .block_on(self.transaction.as_ref().unwrap().query_one(query, params)) } /// Like `Client::query_opt`. @@ -83,7 +98,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query_opt(query, params)) + .block_on(self.transaction.as_ref().unwrap().query_opt(query, params)) } /// Like `Client::query_raw`. @@ -95,7 +110,7 @@ impl<'a> Transaction<'a> { { let stream = self .connection - .block_on(self.transaction.query_raw(query, params))?; + .block_on(self.transaction.as_ref().unwrap().query_raw(query, params))?; Ok(RowIter::new(self.connection.as_ref(), stream)) } @@ -114,7 +129,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.bind(query, params)) + .block_on(self.transaction.as_ref().unwrap().bind(query, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -122,8 +137,12 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - self.connection - .block_on(self.transaction.query_portal(portal, max_rows)) + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_portal(portal, max_rows), + ) } /// The maximally flexible version of `query_portal`. @@ -132,9 +151,12 @@ impl<'a> Transaction<'a> { portal: &Portal, max_rows: i32, ) -> Result, Error> { - let stream = self - .connection - .block_on(self.transaction.query_portal_raw(portal, max_rows))?; + let stream = self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_portal_raw(portal, max_rows), + )?; Ok(RowIter::new(self.connection.as_ref(), stream)) } @@ -143,7 +165,9 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let sink = self.connection.block_on(self.transaction.copy_in(query))?; + let sink = self + .connection + .block_on(self.transaction.as_ref().unwrap().copy_in(query))?; Ok(CopyInWriter::new(self.connection.as_ref(), sink)) } @@ -152,44 +176,45 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let stream = self.connection.block_on(self.transaction.copy_out(query))?; + let stream = self + .connection + .block_on(self.transaction.as_ref().unwrap().copy_out(query))?; Ok(CopyOutReader::new(self.connection.as_ref(), stream)) } /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.connection - .block_on(self.transaction.simple_query(query)) + .block_on(self.transaction.as_ref().unwrap().simple_query(query)) } /// Like `Client::batch_execute`. pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.connection - .block_on(self.transaction.batch_execute(query)) + .block_on(self.transaction.as_ref().unwrap().batch_execute(query)) } /// Like `Client::cancel_token`. pub fn cancel_token(&self) -> CancelToken { - CancelToken::new(self.transaction.cancel_token()) + CancelToken::new(self.transaction.as_ref().unwrap().cancel_token()) } /// Like `Client::transaction`, but creates a nested transaction via a savepoint. pub fn transaction(&mut self) -> Result, Error> { - let transaction = self.connection.block_on(self.transaction.transaction())?; - Ok(Transaction { - connection: self.connection.as_ref(), - transaction, - }) + let transaction = self + .connection + .block_on(self.transaction.as_mut().unwrap().transaction())?; + Ok(Transaction::new(self.connection.as_ref(), transaction)) } + /// Like `Client::transaction`, but creates a nested transaction via a savepoint with the specified name. pub fn savepoint(&mut self, name: I) -> Result, Error> where I: Into, { - let transaction = self.connection.block_on(self.transaction.savepoint(name))?; - Ok(Transaction { - connection: self.connection.as_ref(), - transaction, - }) + let transaction = self + .connection + .block_on(self.transaction.as_mut().unwrap().savepoint(name))?; + Ok(Transaction::new(self.connection.as_ref(), transaction)) } } From f6620e6a24ac416101a811b89b69cd74e137af51 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 19 Jul 2020 13:27:27 -0600 Subject: [PATCH 420/819] Release postgres v0.17.5 --- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index a3ef16cb5..965bd2b09 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.17.5 - 2020-07-19 + +### Fixed + +* Fixed transactions to roll back immediately on drop. + ## v0.17.4 - 2020-07-03 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 2b8a7fbd9..3652ac35d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.4" +version = "0.17.5" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From bc682b3103f9a03e4ee1af8881d826f4bffcbf44 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Jul 2020 21:42:02 -0400 Subject: [PATCH 421/819] Explicitly terminate the connection in sync API Closes #613 --- postgres/src/client.rs | 27 +++++++++++++++++++++++++++ postgres/src/test.rs | 6 ++++++ tokio-postgres/src/client.rs | 5 +++++ 3 files changed, 38 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index a0c61b33d..dcb9c72d4 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -3,6 +3,7 @@ use crate::{ CancelToken, Config, CopyInWriter, CopyOutReader, Notifications, RowIter, Statement, ToStatement, Transaction, TransactionBuilder, }; +use std::task::Poll; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; @@ -13,6 +14,12 @@ pub struct Client { client: tokio_postgres::Client, } +impl Drop for Client { + fn drop(&mut self) { + let _ = self.close_inner(); + } +} + impl Client { pub(crate) fn new(connection: Connection, client: tokio_postgres::Client) -> Client { Client { connection, client } @@ -524,4 +531,24 @@ impl Client { pub fn is_closed(&self) -> bool { self.client.is_closed() } + + /// Closes the client's connection to the server. + /// + /// This is equivalent to `Client`'s `Drop` implementation, except that it returns any error encountered to the + /// caller. + pub fn close(mut self) -> Result<(), Error> { + self.close_inner() + } + + fn close_inner(&mut self) -> Result<(), Error> { + self.client.__private_api_close(); + + self.connection.poll_block_on(|_, _, done| { + if done { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + }) + } } diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 35a6a0c79..557013746 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -475,3 +475,9 @@ fn notifications_timeout_iter() { assert_eq!(notifications[0].payload(), "hello"); assert_eq!(notifications[1].payload(), "world"); } + +#[test] +fn explicit_close() { + let client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + client.close().unwrap(); +} diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 2d9b79728..e19caae83 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -529,6 +529,11 @@ impl Client { pub fn is_closed(&self) -> bool { self.inner.sender.is_closed() } + + #[doc(hidden)] + pub fn __private_api_close(&mut self) { + self.inner.sender.close_channel() + } } impl fmt::Debug for Client { From a30f0b6c0586e467cd48d759b903eb8d4ec7c3e7 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Thu, 30 Jul 2020 22:51:18 -0400 Subject: [PATCH 422/819] Use checked arithmetic when decoding into chrono types This avoids an overflow panic if the timestamp is the special "infinity" or "-infinity" value and produces an error instead. Fix #640. --- postgres-types/src/chrono_04.rs | 9 +++-- tokio-postgres/tests/test/types/chrono_04.rs | 35 +++++++++++++++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 9bfbb786f..fcd25e6d1 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -12,7 +12,9 @@ fn base() -> NaiveDateTime { impl<'a> FromSql<'a> for NaiveDateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; - Ok(base() + Duration::microseconds(t)) + base() + .checked_add_signed(Duration::microseconds(t)) + .ok_or_else(|| "value too large to decode".into()) } accepts!(TIMESTAMP); @@ -104,7 +106,10 @@ impl ToSql for DateTime { impl<'a> FromSql<'a> for NaiveDate { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; - Ok(base().date() + Duration::days(i64::from(jd))) + base() + .date() + .checked_add_signed(Duration::days(i64::from(jd))) + .ok_or_else(|| "value too large to decode".into()) } accepts!(DATE); diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index 13c8dc14f..a8e9e5afa 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -1,6 +1,9 @@ use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; -use tokio_postgres::types::{Date, Timestamp}; +use std::fmt; +use tokio_postgres::types::{Date, FromSqlOwned, Timestamp}; +use tokio_postgres::Client; +use crate::connect; use crate::types::test_type; #[tokio::test] @@ -153,3 +156,33 @@ async fn test_time_params() { ) .await; } + +#[tokio::test] +async fn test_special_params_without_wrapper() { + async fn assert_overflows(client: &mut Client, val: &str, sql_type: &str) + where + T: FromSqlOwned + fmt::Debug, + { + let err = client + .query_one(&*format!("SELECT {}::{}", val, sql_type), &[]) + .await + .unwrap() + .try_get::<_, T>(0) + .unwrap_err(); + assert_eq!( + err.to_string(), + "error deserializing column 0: value too large to decode" + ); + } + + let mut client = connect("user=postgres").await; + + assert_overflows::>(&mut client, "'-infinity'", "timestamptz").await; + assert_overflows::>(&mut client, "'infinity'", "timestamptz").await; + + assert_overflows::(&mut client, "'-infinity'", "timestamp").await; + assert_overflows::(&mut client, "'infinity'", "timestamp").await; + + assert_overflows::(&mut client, "'-infinity'", "date").await; + assert_overflows::(&mut client, "'infinity'", "date").await; +} From ce7ce310b91bd24d80bd98c4e267d2b14ae4220c Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Fri, 31 Jul 2020 20:59:57 -0400 Subject: [PATCH 423/819] Give a more helpful message on error Before: ``` database error: ERROR: insert or update on table "owner_rels" violates foreign key constraint "owner_rels_cid_fkey" ``` After: ``` database error: ERROR: insert or update on table "owner_rels" violates foreign key constraint "owner_rels_cid_fkey" DETAIL: Key (cid)=(4) is not present in table "releases". ``` --- tokio-postgres/src/error/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 0bcf0c6f9..8a59109a8 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -309,7 +309,14 @@ impl DbError { impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "{}: {}", self.severity, self.message) + write!(fmt, "{}: {}", self.severity, self.message)?; + if let Some(detail) = &self.detail { + write!(fmt, "DETAIL: {}", detail)?; + } + if let Some(hint) = &self.hint { + write!(fmt, "HINT: {}", hint)?; + } + Ok(()) } } From 61f6e3e5c49390ab43035a222f67976814525e41 Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Sat, 1 Aug 2020 11:12:22 -0400 Subject: [PATCH 424/819] Add newline before DETAIL and HINT --- tokio-postgres/src/error/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 8a59109a8..f9335cfe7 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -311,10 +311,10 @@ impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message)?; if let Some(detail) = &self.detail { - write!(fmt, "DETAIL: {}", detail)?; + write!(fmt, "\nDETAIL: {}", detail)?; } if let Some(hint) = &self.hint { - write!(fmt, "HINT: {}", hint)?; + write!(fmt, "\nHINT: {}", hint)?; } Ok(()) } From 26d7b38b039e8f0af3b26718f0ebf80b60bc9102 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 12 Aug 2020 13:17:41 +0000 Subject: [PATCH 425/819] Update hmac requirement from 0.8 to 0.9 Updates the requirements on [hmac](https://github.com/RustCrypto/MACs) to permit the latest version. - [Release notes](https://github.com/RustCrypto/MACs/releases) - [Commits](https://github.com/RustCrypto/MACs/compare/hmac-v0.8.0...hmac-v0.9.0) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 6c26ac880..400956c18 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,7 +13,7 @@ base64 = "0.12" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" -hmac = "0.8" +hmac = "0.9" md5 = "0.7" memchr = "2.0" rand = "0.7" From 391a54aea1afb3e333270b8532c351d70a9c4bd4 Mon Sep 17 00:00:00 2001 From: Juan Aguilar Santillana Date: Sun, 20 Sep 2020 10:37:15 +0000 Subject: [PATCH 426/819] Avoiding get reference of a u16 --- tokio-postgres/src/connect.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index db2ddc85f..2cfc9c6e1 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -25,11 +25,12 @@ where let mut error = None; for (i, host) in config.host.iter().enumerate() { - let port = *config + let port = config .port .get(i) .or_else(|| config.port.get(0)) - .unwrap_or(&5432); + .copied() + .unwrap_or(5432); let hostname = match host { Host::Tcp(host) => &**host, From a2ca75e9c46ac40510572f84790d25f686755453 Mon Sep 17 00:00:00 2001 From: Juan Aguilar Santillana Date: Sun, 20 Sep 2020 10:43:22 +0000 Subject: [PATCH 427/819] Prettify `host` cast as str --- tokio-postgres/src/connect.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 2cfc9c6e1..f529ddbbb 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -33,7 +33,7 @@ where .unwrap_or(5432); let hostname = match host { - Host::Tcp(host) => &**host, + Host::Tcp(host) => host.as_str(), // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] Host::Unix(_) => "", From 4af6fcd911227f1d3cf0ea93d8d7ea0c34fb1195 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 16 Sep 2020 02:43:34 -0400 Subject: [PATCH 428/819] Permit configuring the notice callback Right now the behavior is hardcoded to log any received notices at the info level. Add a `notice_callback` configuration option that permits installing an arbitrary callback to handle any received notices. As discussed in #588. --- postgres/src/config.rs | 36 +++++++++++++++++++++++++++++++----- postgres/src/connection.rs | 14 +++++++++++--- postgres/src/test.rs | 18 ++++++++++++++++++ 3 files changed, 60 insertions(+), 8 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b344efdd2..b4d01b1d5 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -4,13 +4,16 @@ use crate::connection::Connection; use crate::Client; +use log::info; use std::fmt; use std::path::Path; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; use tokio::runtime; #[doc(inline)] pub use tokio_postgres::config::{ChannelBinding, Host, SslMode, TargetSessionAttrs}; +use tokio_postgres::error::DbError; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; @@ -90,6 +93,7 @@ use tokio_postgres::{Error, Socket}; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, + notice_callback: Arc, } impl fmt::Debug for Config { @@ -109,9 +113,7 @@ impl Default for Config { impl Config { /// Creates a new configuration. pub fn new() -> Config { - Config { - config: tokio_postgres::Config::new(), - } + tokio_postgres::Config::new().into() } /// Sets the user to authenticate with. @@ -307,6 +309,25 @@ impl Config { self.config.get_channel_binding() } + /// Sets the notice callback. + /// + /// This callback will be invoked with the contents of every + /// [`AsyncMessage::Notice`] that is received by the connection. Notices use + /// the same structure as errors, but they are not "errors" per-se. + /// + /// Notices are distinct from notifications, which are instead accessible + /// via the [`Notifications`] API. + /// + /// [`AsyncMessage::Notice`]: tokio_postgres::AsyncMessage::Notice + /// [`Notifications`]: crate::Notifications + pub fn notice_callback(&mut self, f: F) -> &mut Config + where + F: Fn(DbError) + Send + Sync + 'static, + { + self.notice_callback = Arc::new(f); + self + } + /// Opens a connection to a PostgreSQL database. pub fn connect(&self, tls: T) -> Result where @@ -323,7 +344,7 @@ impl Config { let (client, connection) = runtime.block_on(self.config.connect(tls))?; - let connection = Connection::new(runtime, connection); + let connection = Connection::new(runtime, connection, self.notice_callback.clone()); Ok(Client::new(connection, client)) } } @@ -338,6 +359,11 @@ impl FromStr for Config { impl From for Config { fn from(config: tokio_postgres::Config) -> Config { - Config { config } + Config { + config, + notice_callback: Arc::new(|notice| { + info!("{}: {}", notice.severity(), notice.message()) + }), + } } } diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index acea5eca7..a6abb7278 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -1,24 +1,30 @@ use crate::{Error, Notification}; use futures::future; use futures::{pin_mut, Stream}; -use log::info; use std::collections::VecDeque; use std::future::Future; use std::ops::{Deref, DerefMut}; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::runtime::Runtime; +use tokio_postgres::error::DbError; use tokio_postgres::AsyncMessage; pub struct Connection { runtime: Runtime, connection: Pin> + Send>>, notifications: VecDeque, + notice_callback: Arc, } impl Connection { - pub fn new(runtime: Runtime, connection: tokio_postgres::Connection) -> Connection + pub fn new( + runtime: Runtime, + connection: tokio_postgres::Connection, + notice_callback: Arc, + ) -> Connection where S: AsyncRead + AsyncWrite + Unpin + 'static + Send, T: AsyncRead + AsyncWrite + Unpin + 'static + Send, @@ -27,6 +33,7 @@ impl Connection { runtime, connection: Box::pin(ConnectionStream { connection }), notifications: VecDeque::new(), + notice_callback, } } @@ -55,6 +62,7 @@ impl Connection { { let connection = &mut self.connection; let notifications = &mut self.notifications; + let notice_callback = &mut self.notice_callback; self.runtime.block_on({ future::poll_fn(|cx| { let done = loop { @@ -63,7 +71,7 @@ impl Connection { notifications.push_back(notification); } Poll::Ready(Some(Ok(AsyncMessage::Notice(notice)))) => { - info!("{}: {}", notice.severity(), notice.message()); + notice_callback(notice) } Poll::Ready(Some(Ok(_))) => {} Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e)), diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 557013746..dcf202ef7 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,6 @@ use std::io::{Read, Write}; +use std::str::FromStr; +use std::sync::mpsc; use std::thread; use std::time::Duration; use tokio_postgres::error::SqlState; @@ -476,6 +478,22 @@ fn notifications_timeout_iter() { assert_eq!(notifications[1].payload(), "world"); } +#[test] +fn notice_callback() { + let (notice_tx, notice_rx) = mpsc::sync_channel(64); + let mut client = Config::from_str("host=localhost port=5433 user=postgres") + .unwrap() + .notice_callback(move |n| notice_tx.send(n).unwrap()) + .connect(NoTls) + .unwrap(); + + client + .batch_execute("DO $$BEGIN RAISE NOTICE 'custom'; END$$") + .unwrap(); + + assert_eq!(notice_rx.recv().unwrap().message(), "custom"); +} + #[test] fn explicit_close() { let client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); From bb961edcc4f150b961497c4509a713b9625f29d6 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Fri, 25 Sep 2020 15:36:51 -0400 Subject: [PATCH 429/819] Disable chrono's "oldtime" feature to drop time v0.1 The latest version of chrono, v0.4.16, permits removing its dependency on the long-deprecated time v0.1 crate by disabling the "oldtime" feature. For backwards compatibility, chrono is leaving the "oldtime" feature on by default, so disabling the "oldtime" feature requires disabling all default features and then re-enabling the one default feature ("clock") that rust-postgres needs. Note that this change does not cause any backwards-compatibility problems for users of rust-postgres. The "oldtime" feature controls only whether `time_v01::Duration` and `chrono::Duration` are the same type and does not affect any of the APIs used by rust-postgres. --- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 5295f7225..b50653168 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -28,7 +28,7 @@ postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } -chrono-04 = { version = "0.4", package = "chrono", optional = true } +chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } geo-types-04 = { version = "0.4", package = "geo-types", optional = true } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index dfc9bb0a9..84cc3bc8a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -58,7 +58,7 @@ env_logger = "0.7" criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } -chrono-04 = { version = "0.4", package = "chrono" } +chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-04 = { version = "0.4", package = "eui48" } geo-types-04 = { version = "0.4", package = "geo-types" } geo-types-06 = { version = "0.6", package = "geo-types" } From 6c506c1c1686b1bf06cd26e0dc43618a743e1b90 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 1 Oct 2020 13:18:42 +0000 Subject: [PATCH 430/819] Update base64 requirement from 0.12 to 0.13 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Release notes](https://github.com/marshallpierce/rust-base64/releases) - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.12.0...v0.13.0) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 400956c18..0c89a5724 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [dependencies] -base64 = "0.12" +base64 = "0.13" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" From aadd90854d2e23e64818bf2e899770f4c3bbde32 Mon Sep 17 00:00:00 2001 From: Dan Burkert Date: Fri, 2 Oct 2020 15:56:42 -0700 Subject: [PATCH 431/819] derive Clone, PartialEq and Eq for postgres_types::Json It's a bit unwieldy using the `Json` type in structs without this. `Json` is 'just data', so I think it's usually appropriate to consider instances to be cloneable and testable for equivalence. --- postgres-types/src/serde_json_1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index e5183d3f5..e0fecb496 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -8,7 +8,7 @@ use std::fmt::Debug; use std::io::Read; /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. -#[derive(Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Json(pub T); impl<'a, T> FromSql<'a> for Json From d1f9d6d8020843dbcd4fa6764b5abf33e56ade33 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 15 Oct 2020 21:14:13 -0400 Subject: [PATCH 432/819] fix clippy --- codegen/src/type_gen.rs | 5 +---- postgres-protocol/src/authentication/sasl.rs | 20 ++++---------------- postgres-types/src/lib.rs | 5 +---- postgres-types/src/special.rs | 10 ++-------- 4 files changed, 8 insertions(+), 32 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 485442a3f..7e92e062a 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -136,10 +136,7 @@ impl<'a> DatParser<'a> { fn peek(&mut self, target: char) -> bool { self.skip_ws(); - match self.it.peek() { - Some((_, ch)) if *ch == target => true, - _ => false, - } + matches!(self.it.peek(), Some((_, ch)) if *ch == target) } fn eof(&mut self) { diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index c99a27a2e..416b4b998 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -330,10 +330,7 @@ impl<'a> Parser<'a> { } fn printable(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - '\x21'..='\x2b' | '\x2d'..='\x7e' => true, - _ => false, - }) + self.take_while(|c| matches!(c, '\x21'..='\x2b' | '\x2d'..='\x7e')) } fn nonce(&mut self) -> io::Result<&'a str> { @@ -343,10 +340,7 @@ impl<'a> Parser<'a> { } fn base64(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=' => true, - _ => false, - }) + self.take_while(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=')) } fn salt(&mut self) -> io::Result<&'a str> { @@ -356,10 +350,7 @@ impl<'a> Parser<'a> { } fn posit_number(&mut self) -> io::Result { - let n = self.take_while(|c| match c { - '0'..='9' => true, - _ => false, - })?; + let n = self.take_while(|c| matches!(c, '0'..='9'))?; n.parse() .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } @@ -396,10 +387,7 @@ impl<'a> Parser<'a> { } fn value(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - '\0' | '=' | ',' => false, - _ => true, - }) + self.take_while(|c| matches!(c, '\0' | '=' | ',')) } fn server_error(&mut self) -> io::Result> { diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 258b68edc..e9a5846e1 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -144,10 +144,7 @@ const NSEC_PER_USEC: u64 = 1_000; macro_rules! accepts { ($($expected:ident),+) => ( fn accepts(ty: &$crate::Type) -> bool { - match *ty { - $($crate::Type::$expected)|+ => true, - _ => false - } + matches!(*ty, $($crate::Type::$expected)|+) } ) } diff --git a/postgres-types/src/special.rs b/postgres-types/src/special.rs index 5a2d7bc08..8579885ef 100644 --- a/postgres-types/src/special.rs +++ b/postgres-types/src/special.rs @@ -75,10 +75,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp { } fn accepts(ty: &Type) -> bool { - match *ty { - Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty) => true, - _ => false, - } + matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty)) } } @@ -99,10 +96,7 @@ impl ToSql for Timestamp { } fn accepts(ty: &Type) -> bool { - match *ty { - Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty) => true, - _ => false, - } + matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty)) } to_sql_checked!(); From 74439823688cd7140c507a0950b7dffb12fb2f51 Mon Sep 17 00:00:00 2001 From: wuaoxiang Date: Sat, 17 Oct 2020 11:33:21 +0800 Subject: [PATCH 433/819] Make postgres protocol version value more readable --- postgres-protocol/src/message/frontend.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 8587cd080..5d0a8ff8c 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -260,7 +260,8 @@ where I: IntoIterator, { write_body(buf, |buf| { - buf.put_i32(196_608); + // postgres protocol version 3.0(196608) in bigger-endian + buf.put_i32(0x00_03_00_00); for (key, value) in parameters { write_cstr(key.as_bytes(), buf)?; write_cstr(value.as_bytes(), buf)?; From 2689070d19fbef35f6c61085388cf9bfd6511da5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 09:49:45 -0400 Subject: [PATCH 434/819] Upgrade to tokio 0.3 --- postgres-native-tls/Cargo.toml | 7 ++-- postgres-native-tls/src/lib.rs | 46 ++++++------------------ postgres-openssl/Cargo.toml | 7 ++-- postgres-openssl/src/lib.rs | 34 ++---------------- postgres/Cargo.toml | 2 +- postgres/src/cancel_token.rs | 3 +- postgres/src/config.rs | 3 +- postgres/src/connection.rs | 3 +- postgres/src/notifications.rs | 6 ++-- tokio-postgres/Cargo.toml | 8 ++--- tokio-postgres/benches/bench.rs | 8 ++--- tokio-postgres/src/connect_socket.rs | 10 ++---- tokio-postgres/src/maybe_tls_stream.rs | 45 ++---------------------- tokio-postgres/src/socket.rs | 48 ++------------------------ tokio-postgres/src/tls.rs | 6 ++-- tokio-postgres/tests/test/main.rs | 2 +- tokio-postgres/tests/test/runtime.rs | 2 +- 17 files changed, 49 insertions(+), 191 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index e2d60d1fa..51145bf95 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,13 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -bytes = "0.5" futures = "0.3" native-tls = "0.2" -tokio = "0.2" -tokio-tls = "0.3" +tokio = "0.3" +tokio-native-tls = "0.2" tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } +tokio = { version = "0.3", features = ["full"] } postgres = { version = "0.17.0", path = "../postgres" } diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 207ae6cb2..00413c27b 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -48,13 +48,11 @@ #![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use bytes::{Buf, BufMut}; use std::future::Future; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; @@ -94,7 +92,7 @@ where /// A `TlsConnect` implementation using the `native-tls` crate. pub struct TlsConnector { - connector: tokio_tls::TlsConnector, + connector: tokio_native_tls::TlsConnector, domain: String, } @@ -102,7 +100,7 @@ impl TlsConnector { /// Creates a new connector configured to connect to the specified domain. pub fn new(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { - connector: tokio_tls::TlsConnector::from(connector), + connector: tokio_native_tls::TlsConnector::from(connector), domain: domain.to_string(), } } @@ -129,34 +127,19 @@ where } /// The stream returned by `TlsConnector`. -pub struct TlsStream(tokio_tls::TlsStream); +pub struct TlsStream(tokio_native_tls::TlsStream); impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_read_buf(cx, buf) - } } impl AsyncWrite for TlsStream @@ -178,17 +161,6 @@ where fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_write_buf(cx, buf) - } } impl tls::TlsStream for TlsStream @@ -196,7 +168,9 @@ where S: AsyncRead + AsyncWrite + Unpin, { fn channel_binding(&self) -> ChannelBinding { - // FIXME https://github.com/tokio-rs/tokio/issues/1383 - ChannelBinding::none() + match self.0.get_ref().tls_server_end_point().ok().flatten() { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + } } } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index a3c9f65fa..e022a4f8a 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,13 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -bytes = "0.5" futures = "0.3" openssl = "0.10" -tokio = "0.2" -tokio-openssl = "0.4" +tokio = "0.3" +tokio-openssl = "0.5" tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } +tokio = { version = "0.3", features = ["full"] } postgres = { version = "0.17.0", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 23a653c60..3780f2082 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -42,7 +42,6 @@ #![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use bytes::{Buf, BufMut}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; @@ -53,12 +52,11 @@ use openssl::ssl::{ConnectConfiguration, SslRef}; use std::fmt::Debug; use std::future::Future; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio_openssl::{HandshakeError, SslStream}; use tokio_postgres::tls; #[cfg(feature = "runtime")] @@ -157,28 +155,13 @@ impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_read_buf(cx, buf) - } } impl AsyncWrite for TlsStream @@ -200,17 +183,6 @@ where fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_write_buf(cx, buf) - } } impl tls::TlsStream for TlsStream diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 3652ac35d..db9104453 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -36,7 +36,7 @@ fallible-iterator = "0.2" futures = "0.3" tokio-postgres = { version = "0.5.5", path = "../tokio-postgres" } -tokio = { version = "0.2", features = ["rt-core", "time"] } +tokio = { version = "0.3", features = ["rt", "time"] } log = "0.4" [dev-dependencies] diff --git a/postgres/src/cancel_token.rs b/postgres/src/cancel_token.rs index f140e60e2..be24edcc8 100644 --- a/postgres/src/cancel_token.rs +++ b/postgres/src/cancel_token.rs @@ -26,9 +26,8 @@ impl CancelToken { where T: MakeTlsConnect, { - runtime::Builder::new() + runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap() // FIXME don't unwrap .block_on(self.0.cancel_query(tls)) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b4d01b1d5..249e6b44e 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -336,9 +336,8 @@ impl Config { T::Stream: Send, >::Future: Send, { - let mut runtime = runtime::Builder::new() + let runtime = runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap(); // FIXME don't unwrap diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index a6abb7278..bc8564f54 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -45,7 +45,8 @@ impl Connection { where F: FnOnce() -> T, { - self.runtime.enter(f) + let _guard = self.runtime.enter(); + f() } pub fn block_on(&mut self, future: F) -> Result diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs index e8c681548..241c95a5d 100644 --- a/postgres/src/notifications.rs +++ b/postgres/src/notifications.rs @@ -6,7 +6,7 @@ use fallible_iterator::FallibleIterator; use futures::{ready, FutureExt}; use std::task::Poll; use std::time::Duration; -use tokio::time::{self, Delay, Instant}; +use tokio::time::{self, Instant, Sleep}; /// Notifications from a PostgreSQL backend. pub struct Notifications<'a> { @@ -64,7 +64,7 @@ impl<'a> Notifications<'a> { /// This iterator may start returning `Some` after previously returning `None` if more notifications are received. pub fn timeout_iter(&mut self, timeout: Duration) -> TimeoutIter<'_> { TimeoutIter { - delay: self.connection.enter(|| time::delay_for(timeout)), + delay: self.connection.enter(|| time::sleep(timeout)), timeout, connection: self.connection.as_ref(), } @@ -124,7 +124,7 @@ impl<'a> FallibleIterator for BlockingIter<'a> { /// A time-limited blocking iterator over pending notifications. pub struct TimeoutIter<'a> { connection: ConnectionRef<'a>, - delay: Delay, + delay: Sleep, timeout: Duration, } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 84cc3bc8a..aea6aa5ca 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -25,7 +25,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio/dns", "tokio/net", "tokio/time"] +runtime = ["tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] @@ -49,11 +49,11 @@ pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.2", path = "../postgres-types" } -tokio = { version = "0.2", features = ["io-util"] } -tokio-util = { version = "0.3", features = ["codec"] } +tokio = { version = "0.3", features = ["io-util"] } +tokio-util = { version = "0.4", features = ["codec"] } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } +tokio = { version = "0.3", features = ["full"] } env_logger = "0.7" criterion = "0.3" diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs index 315bea8e4..fececa2b5 100644 --- a/tokio-postgres/benches/bench.rs +++ b/tokio-postgres/benches/bench.rs @@ -7,7 +7,7 @@ use tokio::runtime::Runtime; use tokio_postgres::{Client, NoTls}; fn setup() -> (Client, Runtime) { - let mut runtime = Runtime::new().unwrap(); + let runtime = Runtime::new().unwrap(); let (client, conn) = runtime .block_on(tokio_postgres::connect( "host=localhost port=5433 user=postgres", @@ -19,7 +19,7 @@ fn setup() -> (Client, Runtime) { } fn query_prepared(c: &mut Criterion) { - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("runtime_block_on", move |b| { b.iter(|| { @@ -29,13 +29,13 @@ fn query_prepared(c: &mut Criterion) { }) }); - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("executor_block_on", move |b| { b.iter(|| executor::block_on(client.query(&statement, &[&1i64])).unwrap()) }); - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let client = Arc::new(client); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("spawned", move |b| { diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 2d56a2ed5..145eb7dce 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -12,19 +12,15 @@ pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, - keepalives: bool, - keepalives_idle: Duration, + _keepalives: bool, + _keepalives_idle: Duration, ) -> Result { match host { Host::Tcp(host) => { let socket = connect_with_timeout(TcpStream::connect((&**host, port)), connect_timeout).await?; socket.set_nodelay(true).map_err(Error::connect)?; - if keepalives { - socket - .set_keepalive(Some(keepalives_idle)) - .map_err(Error::connect)?; - } + // FIXME support keepalives? Ok(Socket::new_tcp(socket)) } diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs index 652236ee8..73b0c4721 100644 --- a/tokio-postgres/src/maybe_tls_stream.rs +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -1,10 +1,8 @@ use crate::tls::{ChannelBinding, TlsStream}; -use bytes::{Buf, BufMut}; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pub enum MaybeTlsStream { Raw(S), @@ -16,38 +14,16 @@ where S: AsyncRead + Unpin, T: AsyncRead + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - match self { - MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), - MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), - } - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_read(cx, buf), MaybeTlsStream::Tls(s) => Pin::new(s).poll_read(cx, buf), } } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: BufMut, - { - match &mut *self { - MaybeTlsStream::Raw(s) => Pin::new(s).poll_read_buf(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_read_buf(cx, buf), - } - } } impl AsyncWrite for MaybeTlsStream @@ -79,21 +55,6 @@ where MaybeTlsStream::Tls(s) => Pin::new(s).poll_shutdown(cx), } } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: Buf, - { - match &mut *self { - MaybeTlsStream::Raw(s) => Pin::new(s).poll_write_buf(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_write_buf(cx, buf), - } - } } impl TlsStream for MaybeTlsStream diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index cc7149674..966510d56 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -1,9 +1,7 @@ -use bytes::{Buf, BufMut}; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; @@ -33,41 +31,17 @@ impl Socket { } impl AsyncRead for Socket { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - match &self.0 { - Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), - #[cfg(unix)] - Inner::Unix(s) => s.prepare_uninitialized_buffer(buf), - } - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { match &mut self.0 { Inner::Tcp(s) => Pin::new(s).poll_read(cx, buf), #[cfg(unix)] Inner::Unix(s) => Pin::new(s).poll_read(cx, buf), } } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: BufMut, - { - match &mut self.0 { - Inner::Tcp(s) => Pin::new(s).poll_read_buf(cx, buf), - #[cfg(unix)] - Inner::Unix(s) => Pin::new(s).poll_read_buf(cx, buf), - } - } } impl AsyncWrite for Socket { @@ -98,20 +72,4 @@ impl AsyncWrite for Socket { Inner::Unix(s) => Pin::new(s).poll_shutdown(cx), } } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: Buf, - { - match &mut self.0 { - Inner::Tcp(s) => Pin::new(s).poll_write_buf(cx, buf), - #[cfg(unix)] - Inner::Unix(s) => Pin::new(s).poll_write_buf(cx, buf), - } - } } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 4e852d3f9..963daed18 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -5,7 +5,7 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, io}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pub(crate) mod private { pub struct ForcePrivateApi; @@ -125,8 +125,8 @@ impl AsyncRead for NoTlsStream { fn poll_read( self: Pin<&mut Self>, _: &mut Context<'_>, - _: &mut [u8], - ) -> Poll> { + _: &mut ReadBuf<'_>, + ) -> Poll> { match *self {} } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index b01037edc..bf6d72d3e 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -308,7 +308,7 @@ async fn cancel_query_raw() { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let cancel_token = client.cancel_token(); let cancel = cancel_token.cancel_query_raw(socket, NoTls); - let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); + let cancel = time::sleep(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index e07aa4a63..b088d6c98 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -72,7 +72,7 @@ async fn cancel_query() { let cancel_token = client.cancel_token(); let cancel = cancel_token.cancel_query(NoTls); - let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); + let cancel = time::sleep(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); From b30182962f6aa354b233f8a229c1b87c9b196d20 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 09:52:46 -0400 Subject: [PATCH 435/819] bump ci image --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f3dae7101..8038a2c0f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ version: 2 jobs: build: docker: - - image: rust:1.41.0 + - image: rust:1.45.0 environment: RUSTFLAGS: -D warnings - image: sfackler/rust-postgres-test:6 From 3f350680d57a9993ac5306bf52be7f17efe92d58 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 14:53:34 -0400 Subject: [PATCH 436/819] Release postgres-protocol v0.5.3 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 3cff60c51..b476fcabf 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.3 - 2020-10-17 + +### Changed + +* Upgraded `base64` and `hmac`. + ## v0.5.2 - 2020-07-06 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 0c89a5724..cb7657eea 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.2" +version = "0.5.3" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From 7fd04e7a21cc5ae9832f24a4b28292b40fbcaa8f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 15:00:47 -0400 Subject: [PATCH 437/819] Release postgres-types v0.1.3 --- postgres-types/CHANGELOG.md | 10 ++++++++++ postgres-types/Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index ddbdc6844..1c267923c 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,15 @@ # Change Log +## v0.1.3 - 2020-10-17 + +### Added + +* Implemented `Clone`, `PartialEq`, and `Eq` for `Json`. + +### Fixed + +* Checked for overflow in `NaiveDate` and `NaiveDateTime` conversions. + ## v0.1.2 - 2020-07-03 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index b50653168..c5e706f6e 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.2" +version = "0.1.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 12e2f03b53636b9348e07c5d83191a7c7805a0f4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 15:05:09 -0400 Subject: [PATCH 438/819] Release tokio-postgres v0.6.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 7 +++++++ tokio-postgres/Cargo.toml | 2 +- 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 51145bf95..21b21c9eb 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3" native-tls = "0.2" tokio = "0.3" tokio-native-tls = "0.2" -tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.3", features = ["full"] } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index e022a4f8a..d9bff72b8 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3" openssl = "0.10" tokio = "0.3" tokio-openssl = "0.5" -tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "0.3", features = ["full"] } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index db9104453..24dc211eb 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -34,7 +34,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "0.5" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.5.5", path = "../tokio-postgres" } +tokio-postgres = { version = "0.6.0", path = "../tokio-postgres" } tokio = { version = "0.3", features = ["rt", "time"] } log = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index a02c461a5..95fd3ed6a 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,12 @@ # Change Log +## v0.6.0 - 2020-10-17 + +### Changed + +* Upgraded to tokio `0.3`. +* Added the detail and hint fields to `DbError`'s `Display` implementation. + ## v0.5.5 - 2020-07-03 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index aea6aa5ca..63b216488 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.5.5" +version = "0.6.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From cc77ad18d5fc6059e565dc99fce5ec674c4138de Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 15:16:56 -0400 Subject: [PATCH 439/819] Release postgres v0.18.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 14 ++++++++++++++ postgres/Cargo.toml | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 21b21c9eb..b73a3bcca 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "0.3", features = ["full"] } -postgres = { version = "0.17.0", path = "../postgres" } +postgres = { version = "0.18.0", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index d9bff72b8..bd5c64faa 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "0.3", features = ["full"] } -postgres = { version = "0.17.0", path = "../postgres" } +postgres = { version = "0.18.0", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 965bd2b09..c4e6b948d 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,19 @@ # Change Log +## v0.18.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + +### Added + +* Added `Config::notice_callback`, which can be used to provide a custom callback for notices. + +### Fixed + +* Fixed client shutdown to explicitly terminate the database session. + ## v0.17.5 - 2020-07-19 ### Fixed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 24dc211eb..da4c6259f 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.17.5" +version = "0.18.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 473562175fc685688b4364d121703ee82c7ab983 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 15:18:28 -0400 Subject: [PATCH 440/819] Release postgres-native-tls v0.4.0 --- postgres-native-tls/CHANGELOG.md | 6 ++++++ postgres-native-tls/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index fd9180b3d..a840592e3 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + ## v0.3.0 - 2019-12-23 ### Changed diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index b73a3bcca..81dcf29e3 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.3.0" +version = "0.4.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From ec1c4a5c6533109e42adcf1fa9d5f60cbe1e2ba8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Oct 2020 15:20:32 -0400 Subject: [PATCH 441/819] Release postgres-openssl v0.4.0 --- postgres-openssl/CHANGELOG.md | 6 ++++++ postgres-openssl/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index 45a1bd065..f740828c5 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + ## v0.3.0 - 2019-12-23 ### Changed diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index bd5c64faa..c825e929d 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.3.0" +version = "0.4.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 6561d8129a83fcb2037c4fd5546b56dc9676ae2a Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Oct 2020 13:23:29 +0000 Subject: [PATCH 442/819] Update env_logger requirement from 0.7 to 0.8 Updates the requirements on [env_logger](https://github.com/env-logger-rs/env_logger) to permit the latest version. - [Release notes](https://github.com/env-logger-rs/env_logger/releases) - [Changelog](https://github.com/env-logger-rs/env_logger/blob/master/CHANGELOG.md) - [Commits](https://github.com/env-logger-rs/env_logger/compare/v0.7.0...v0.8.1) Signed-off-by: dependabot-preview[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 63b216488..883f8e1ee 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -54,7 +54,7 @@ tokio-util = { version = "0.4", features = ["codec"] } [dev-dependencies] tokio = { version = "0.3", features = ["full"] } -env_logger = "0.7" +env_logger = "0.8" criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } From a2ab2aab5b9db832d504e13b5e4eb51e02d0717e Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Oct 2020 13:24:08 +0000 Subject: [PATCH 443/819] Update hmac requirement from 0.9 to 0.10 Updates the requirements on [hmac](https://github.com/RustCrypto/MACs) to permit the latest version. - [Release notes](https://github.com/RustCrypto/MACs/releases) - [Commits](https://github.com/RustCrypto/MACs/compare/hmac-v0.9.0...hmac-v0.10.1) Signed-off-by: dependabot-preview[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index cb7657eea..5c7e11478 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,7 +13,7 @@ base64 = "0.13" byteorder = "1.0" bytes = "0.5" fallible-iterator = "0.2" -hmac = "0.9" +hmac = "0.10" md5 = "0.7" memchr = "2.0" rand = "0.7" From db90323b0806f6fb00f5012b2e80fe8658f19224 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 19 Oct 2020 19:58:41 -0400 Subject: [PATCH 444/819] Make postgres::Client Send again Closes #677 --- postgres/src/connection.rs | 4 ++-- postgres/src/test.rs | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index bc8564f54..453eef3a6 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -16,14 +16,14 @@ pub struct Connection { runtime: Runtime, connection: Pin> + Send>>, notifications: VecDeque, - notice_callback: Arc, + notice_callback: Arc, } impl Connection { pub fn new( runtime: Runtime, connection: tokio_postgres::Connection, - notice_callback: Arc, + notice_callback: Arc, ) -> Connection where S: AsyncRead + AsyncWrite + Unpin + 'static + Send, diff --git a/postgres/src/test.rs b/postgres/src/test.rs index dcf202ef7..0fd404574 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -499,3 +499,12 @@ fn explicit_close() { let client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); client.close().unwrap(); } + +#[test] +fn check_send() { + fn is_send() {} + + is_send::(); + is_send::(); + is_send::>(); +} From 5e065c36cdcaa8741c712833a947670b46e952c3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 19 Oct 2020 20:02:01 -0400 Subject: [PATCH 445/819] Release v0.18.1 --- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index c4e6b948d..2c9443986 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.18.1 - 20201-10-19 + +### Fixed + +* Restored the `Send` implementation for `Client`. + ## v0.18.0 - 2020-10-17 ### Changed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index da4c6259f..b84becd19 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.18.0" +version = "0.18.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 0eab5fad70a0af4ce754560661cf263f5a847193 Mon Sep 17 00:00:00 2001 From: Bernardo Uriarte Blanco Date: Mon, 26 Oct 2020 20:59:28 +0100 Subject: [PATCH 446/819] make iterators from `&dyn ToSql` or `T: ToSql` work as parameters --- postgres-types/src/lib.rs | 18 +++++++++++++++ tokio-postgres/src/binary_copy.rs | 7 ++++-- tokio-postgres/src/bind.rs | 7 +++--- tokio-postgres/src/client.rs | 11 +++++---- tokio-postgres/src/query.rs | 38 +++++++++++++++++++++---------- 5 files changed, 60 insertions(+), 21 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index e9a5846e1..c8d65e77a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -951,3 +951,21 @@ fn downcast(len: usize) -> Result> { Ok(len as i32) } } + +/// A helper trait to be able create a parameters iterator from `&dyn ToSql` or `T: ToSql` +pub trait BorrowToSql { + /// Get a reference to a `ToSql` trait object + fn borrow_to_sql(&self) -> &dyn ToSql; +} + +impl BorrowToSql for &dyn ToSql { + fn borrow_to_sql(&self) -> &dyn ToSql { + *self + } +} + +impl BorrowToSql for T { + fn borrow_to_sql(&self) -> &dyn ToSql { + self + } +} diff --git a/tokio-postgres/src/binary_copy.rs b/tokio-postgres/src/binary_copy.rs index 231f202d8..20064c728 100644 --- a/tokio-postgres/src/binary_copy.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -6,6 +6,7 @@ use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures::{ready, SinkExt, Stream}; use pin_project_lite::pin_project; +use postgres_types::BorrowToSql; use std::convert::TryFrom; use std::io; use std::io::Cursor; @@ -58,9 +59,10 @@ impl BinaryCopyInWriter { /// # Panics /// /// Panics if the number of values provided does not match the number expected. - pub async fn write_raw<'a, I>(self: Pin<&mut Self>, values: I) -> Result<(), Error> + pub async fn write_raw(self: Pin<&mut Self>, values: I) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let mut this = self.project(); @@ -79,6 +81,7 @@ impl BinaryCopyInWriter { let idx = this.buf.len(); this.buf.put_i32(0); let len = match value + .borrow_to_sql() .to_sql_checked(type_, this.buf) .map_err(|e| Error::to_sql(e, i))? { diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 69823a9ab..9c5c49218 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -1,7 +1,7 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::ToSql; +use crate::types::BorrowToSql; use crate::{query, Error, Portal, Statement}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -10,13 +10,14 @@ use std::sync::Arc; static NEXT_ID: AtomicUsize = AtomicUsize::new(0); -pub async fn bind<'a, I>( +pub async fn bind( client: &Arc, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index e19caae83..ecf3ea601 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -20,6 +20,7 @@ use futures::channel::mpsc; use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; +use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; use std::sync::Arc; @@ -342,10 +343,11 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result + pub async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let statement = statement.__convert().into_statement(self).await?; @@ -391,10 +393,11 @@ impl Client { /// Panics if the number of parameters provided does not match the number expected. /// /// [`execute`]: #method.execute - pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result + pub async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let statement = statement.__convert().into_statement(self).await?; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 7792f0a8a..2245b982a 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -1,7 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::{IsNull, ToSql}; +use crate::types::{BorrowToSql, IsNull}; use crate::{Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; use futures::{ready, Stream}; @@ -9,17 +9,28 @@ use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; +use std::fmt; use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -pub async fn query<'a, I>( +struct BorrowToSqlParamsDebug<'a, T: BorrowToSql>(&'a [T]); +impl<'a, T: BorrowToSql> std::fmt::Debug for BorrowToSqlParamsDebug<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.0.iter().map(|x| x.borrow_to_sql())) + .finish() + } +} + +pub async fn query( client: &InnerClient, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = if log_enabled!(Level::Debug) { @@ -27,7 +38,7 @@ where debug!( "executing statement {} with parameters: {:?}", statement.name(), - params, + BorrowToSqlParamsDebug(params.as_slice()), ); encode(client, &statement, params)? } else { @@ -61,13 +72,14 @@ pub async fn query_portal( }) } -pub async fn execute<'a, I>( +pub async fn execute( client: &InnerClient, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = if log_enabled!(Level::Debug) { @@ -75,7 +87,7 @@ where debug!( "executing statement {} with parameters: {:?}", statement.name(), - params, + BorrowToSqlParamsDebug(params.as_slice()), ); encode(client, &statement, params)? } else { @@ -114,9 +126,10 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result { Ok(responses) } -pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result +pub fn encode(client: &InnerClient, statement: &Statement, params: I) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { client.with_buf(|buf| { @@ -127,14 +140,15 @@ where }) } -pub fn encode_bind<'a, I>( +pub fn encode_bind( statement: &Statement, params: I, portal: &str, buf: &mut BytesMut, ) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let params = params.into_iter(); @@ -152,7 +166,7 @@ where statement.name(), Some(1), params.zip(statement.params()).enumerate(), - |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { + |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), Err(e) => { From 0e494a08a982bcd743c71e05f366fb51da86e075 Mon Sep 17 00:00:00 2001 From: Bernardo Uriarte Blanco Date: Tue, 27 Oct 2020 18:08:23 +0100 Subject: [PATCH 447/819] change rest of `*_raw` methods to use `BorrowToSql` --- postgres/src/binary_copy.rs | 7 +++--- postgres/src/client.rs | 9 ++++---- postgres/src/generic_client.rs | 17 +++++++++------ postgres/src/transaction.rs | 7 +++--- tokio-postgres/src/generic_client.rs | 32 +++++++++++++++++----------- tokio-postgres/src/transaction.rs | 17 +++++++++------ 6 files changed, 52 insertions(+), 37 deletions(-) diff --git a/postgres/src/binary_copy.rs b/postgres/src/binary_copy.rs index 259347195..98ae666b7 100644 --- a/postgres/src/binary_copy.rs +++ b/postgres/src/binary_copy.rs @@ -1,7 +1,7 @@ //! Utilities for working with the PostgreSQL binary copy format. use crate::connection::ConnectionRef; -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; use crate::{CopyInWriter, CopyOutReader, Error}; use fallible_iterator::FallibleIterator; use futures::StreamExt; @@ -46,9 +46,10 @@ impl<'a> BinaryCopyInWriter<'a> { /// # Panics /// /// Panics if the number of values provided does not match the number expected. - pub fn write_raw<'b, I>(&mut self, values: I) -> Result<(), Error> + pub fn write_raw(&mut self, values: I) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.connection diff --git a/postgres/src/client.rs b/postgres/src/client.rs index dcb9c72d4..8ea038da4 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -5,7 +5,7 @@ use crate::{ }; use std::task::Poll; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::types::{BorrowToSql, ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; /// A synchronous PostgreSQL client. @@ -227,7 +227,7 @@ impl Client { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// /// let baz = true; - /// let mut it = client.query_raw("SELECT foo FROM bar WHERE baz = $1", iter::once(&baz as _))?; + /// let mut it = client.query_raw("SELECT foo FROM bar WHERE baz = $1", iter::once(baz))?; /// /// while let Some(row) = it.next()? { /// let foo: i32 = row.get("foo"); @@ -263,10 +263,11 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + pub fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = self diff --git a/postgres/src/generic_client.rs b/postgres/src/generic_client.rs index 42a466df6..12f07465d 100644 --- a/postgres/src/generic_client.rs +++ b/postgres/src/generic_client.rs @@ -1,4 +1,4 @@ -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; use crate::{ Client, CopyInWriter, CopyOutReader, Error, Row, RowIter, SimpleQueryMessage, Statement, ToStatement, Transaction, @@ -37,10 +37,11 @@ pub trait GenericClient: private::Sealed { T: ?Sized + ToStatement; /// Like `Client::query_raw`. - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator; /// Like `Client::prepare`. @@ -104,10 +105,11 @@ impl GenericClient for Client { self.query_opt(query, params) } - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.query_raw(query, params) @@ -183,10 +185,11 @@ impl GenericClient for Transaction<'_> { self.query_opt(query, params) } - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.query_raw(query, params) diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 3213b7c1f..17c49c406 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,6 +1,6 @@ use crate::connection::ConnectionRef; use crate::{CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; -use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::types::{BorrowToSql, ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// A representation of a PostgreSQL database transaction. @@ -102,10 +102,11 @@ impl<'a> Transaction<'a> { } /// Like `Client::query_raw`. - pub fn query_raw<'b, T, I>(&mut self, query: &T, params: I) -> Result, Error> + pub fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = self diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index ad318e864..df2c6b842 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -1,5 +1,5 @@ use crate::query::RowStream; -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; use crate::{Client, Error, Row, Statement, ToStatement, Transaction}; use async_trait::async_trait; @@ -18,10 +18,11 @@ pub trait GenericClient: private::Sealed { T: ?Sized + ToStatement + Sync + Send; /// Like `Client::execute_raw`. - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; /// Like `Client::query`. @@ -48,10 +49,11 @@ pub trait GenericClient: private::Sealed { T: ?Sized + ToStatement + Sync + Send; /// Like `Client::query_raw`. - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; /// Like `Client::prepare`. @@ -79,10 +81,11 @@ impl GenericClient for Client { self.execute(query, params).await } - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.execute_raw(statement, params).await @@ -117,10 +120,11 @@ impl GenericClient for Client { self.query_opt(statement, params).await } - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.query_raw(statement, params).await @@ -155,10 +159,11 @@ impl GenericClient for Transaction<'_> { self.execute(query, params).await } - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.execute_raw(statement, params).await @@ -193,10 +198,11 @@ impl GenericClient for Transaction<'_> { self.query_opt(statement, params).await } - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.query_raw(statement, params).await diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 38fdf7cea..45e9cc3aa 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -5,7 +5,7 @@ use crate::query::RowStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{ @@ -139,10 +139,11 @@ impl<'a> Transaction<'a> { } /// Like `Client::query_raw`. - pub async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + pub async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.client.query_raw(statement, params).await @@ -161,10 +162,11 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute_iter`. - pub async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + pub async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.client.execute_raw(statement, params).await @@ -192,10 +194,11 @@ impl<'a> Transaction<'a> { /// A maximally flexible version of [`bind`]. /// /// [`bind`]: #method.bind - pub async fn bind_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + pub async fn bind_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let statement = statement.__convert().into_statement(&self.client).await?; From 42dcfb584afea518e5b487e352c8ba87b75c571f Mon Sep 17 00:00:00 2001 From: Bernardo Uriarte Blanco Date: Tue, 27 Oct 2020 20:23:57 +0100 Subject: [PATCH 448/819] seal BorrowToSql --- postgres-types/src/lib.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index c8d65e77a..6542fc592 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -108,6 +108,7 @@ #![doc(html_root_url = "https://docs.rs/postgres-types/0.1")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] +use crate::sealed::Sealed; use fallible_iterator::FallibleIterator; use postgres_protocol::types::{self, ArrayDimension}; use std::any::type_name; @@ -952,18 +953,29 @@ fn downcast(len: usize) -> Result> { } } -/// A helper trait to be able create a parameters iterator from `&dyn ToSql` or `T: ToSql` -pub trait BorrowToSql { +mod sealed { + pub trait Sealed {} +} + +/// A helper trait used internally by Rust-Postgres +/// to be able create a parameters iterator from `&dyn ToSql` or `T: ToSql`. +/// +/// /// This cannot be implemented outside of this crate. +pub trait BorrowToSql: sealed::Sealed { /// Get a reference to a `ToSql` trait object fn borrow_to_sql(&self) -> &dyn ToSql; } +impl Sealed for &dyn ToSql {} + impl BorrowToSql for &dyn ToSql { fn borrow_to_sql(&self) -> &dyn ToSql { *self } } +impl Sealed for T {} + impl BorrowToSql for T { fn borrow_to_sql(&self) -> &dyn ToSql { self From 996f0633802267ce45cf7170d2c8a935f22b534e Mon Sep 17 00:00:00 2001 From: Bernardo Uriarte Blanco Date: Tue, 27 Oct 2020 20:31:15 +0100 Subject: [PATCH 449/819] map to convert to `&dyn ToSql` is not longer necessary --- postgres/src/client.rs | 2 +- tokio-postgres/src/client.rs | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 8ea038da4..f5637cdbd 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -253,7 +253,7 @@ impl Client { /// ]; /// let mut it = client.query_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// params.iter().map(|p| p as &dyn ToSql), + /// params, /// )?; /// /// while let Some(row) = it.next()? { diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index ecf3ea601..359a7cd16 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -318,9 +318,6 @@ impl Client { /// /// # Examples /// - /// If you have a type like `Vec` where `T: ToSql` Rust will not know how to use it as params. To get around - /// this the type must explicitly be converted to `&dyn ToSql`. - /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { /// use tokio_postgres::types::ToSql; @@ -332,7 +329,7 @@ impl Client { /// ]; /// let mut it = client.query_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// params.iter().map(|p| p as &dyn ToSql), + /// params, /// ).await?; /// /// pin_mut!(it); From 46b4b8008cfb8283b7cf73e698bbf7737b5aca1e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 27 Oct 2020 19:16:20 -0400 Subject: [PATCH 450/819] Update postgres-types/src/lib.rs --- postgres-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 6542fc592..ecda2fbd2 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -960,7 +960,7 @@ mod sealed { /// A helper trait used internally by Rust-Postgres /// to be able create a parameters iterator from `&dyn ToSql` or `T: ToSql`. /// -/// /// This cannot be implemented outside of this crate. +/// This cannot be implemented outside of this crate. pub trait BorrowToSql: sealed::Sealed { /// Get a reference to a `ToSql` trait object fn borrow_to_sql(&self) -> &dyn ToSql; From bbf31696bb4ed1d7727d3de6bf747975a0dcae1d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 27 Oct 2020 19:45:55 -0400 Subject: [PATCH 451/819] Cleanups --- postgres-types/src/lib.rs | 17 ++++++++++------- tokio-postgres/src/query.rs | 8 ++++++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index ecda2fbd2..7e81998b3 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -108,7 +108,6 @@ #![doc(html_root_url = "https://docs.rs/postgres-types/0.1")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] -use crate::sealed::Sealed; use fallible_iterator::FallibleIterator; use postgres_protocol::types::{self, ArrayDimension}; use std::any::type_name; @@ -957,26 +956,30 @@ mod sealed { pub trait Sealed {} } -/// A helper trait used internally by Rust-Postgres -/// to be able create a parameters iterator from `&dyn ToSql` or `T: ToSql`. +/// A trait used by clients to abstract over `&dyn ToSql` and `T: ToSql`. /// /// This cannot be implemented outside of this crate. pub trait BorrowToSql: sealed::Sealed { - /// Get a reference to a `ToSql` trait object + /// Returns a reference to `self` as a `ToSql` trait object. fn borrow_to_sql(&self) -> &dyn ToSql; } -impl Sealed for &dyn ToSql {} +impl sealed::Sealed for &dyn ToSql {} impl BorrowToSql for &dyn ToSql { + #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { *self } } -impl Sealed for T {} +impl sealed::Sealed for T where T: ToSql {} -impl BorrowToSql for T { +impl BorrowToSql for T +where + T: ToSql, +{ + #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { self } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 2245b982a..f139ed915 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -14,8 +14,12 @@ use std::marker::PhantomPinned; use std::pin::Pin; use std::task::{Context, Poll}; -struct BorrowToSqlParamsDebug<'a, T: BorrowToSql>(&'a [T]); -impl<'a, T: BorrowToSql> std::fmt::Debug for BorrowToSqlParamsDebug<'a, T> { +struct BorrowToSqlParamsDebug<'a, T>(&'a [T]); + +impl<'a, T> fmt::Debug for BorrowToSqlParamsDebug<'a, T> +where + T: BorrowToSql, +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.0.iter().map(|x| x.borrow_to_sql())) From 5ad3c9a139303ba0c63b5c06337790a41d6474a2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 5 Nov 2020 21:14:56 -0500 Subject: [PATCH 452/819] Add back keepalives config handling Also fix connection timeouts to be per-address --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/connect_socket.rs | 55 ++++++++++++++++++++++++---- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 883f8e1ee..14f8c7e9d 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -49,6 +49,7 @@ pin-project-lite = "0.1" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.2", path = "../postgres-types" } +socket2 = "0.3" tokio = { version = "0.3", features = ["io-util"] } tokio-util = { version = "0.4", features = ["codec"] } diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 145eb7dce..564677b05 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,28 +1,67 @@ use crate::config::Host; use crate::{Error, Socket}; +use socket2::{Domain, Protocol, Type}; use std::future::Future; use std::io; +use std::net::SocketAddr; +#[cfg(unix)] +use std::os::unix::io::{FromRawFd, IntoRawFd}; +#[cfg(windows)] +use std::os::windows::io::{FromRawSocket, IntoRawSocket}; use std::time::Duration; -use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; +use tokio::net::{self, TcpSocket}; use tokio::time; pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, - _keepalives: bool, - _keepalives_idle: Duration, + keepalives: bool, + keepalives_idle: Duration, ) -> Result { match host { Host::Tcp(host) => { - let socket = - connect_with_timeout(TcpStream::connect((&**host, port)), connect_timeout).await?; - socket.set_nodelay(true).map_err(Error::connect)?; - // FIXME support keepalives? + let addrs = net::lookup_host((&**host, port)) + .await + .map_err(Error::connect)?; + + let mut last_err = None; + + for addr in addrs { + let domain = match addr { + SocketAddr::V4(_) => Domain::ipv4(), + SocketAddr::V6(_) => Domain::ipv6(), + }; + + let socket = socket2::Socket::new(domain, Type::stream(), Some(Protocol::tcp())) + .map_err(Error::connect)?; + socket.set_nonblocking(true).map_err(Error::connect)?; + socket.set_nodelay(true).map_err(Error::connect)?; + if keepalives { + socket + .set_keepalive(Some(keepalives_idle)) + .map_err(Error::connect)?; + } + + #[cfg(unix)] + let socket = unsafe { TcpSocket::from_raw_fd(socket.into_raw_fd()) }; + #[cfg(windows)] + let socket = unsafe { TcpSocket::from_raw_socket(socket.into_raw_socket()) }; + + match connect_with_timeout(socket.connect(addr), connect_timeout).await { + Ok(socket) => return Ok(Socket::new_tcp(socket)), + Err(e) => last_err = Some(e), + } + } - Ok(Socket::new_tcp(socket)) + Err(last_err.unwrap_or_else(|| { + Error::connect(io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve any addresses", + )) + })) } #[cfg(unix)] Host::Unix(path) => { From c4b1f5ab5c1d0430c6489f2da39aa7a058373bdd Mon Sep 17 00:00:00 2001 From: Nicolas Guiard Date: Wed, 11 Nov 2020 11:14:28 +0100 Subject: [PATCH 453/819] Fix unix socket location in docs (/var/run/postgresql instead of /var/lib/postgresql) --- postgres/src/config.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 249e6b44e..c8dffa330 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -59,7 +59,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// host=/var/run/postgresql,localhost port=1234 user=postgres password='password with spaces' /// ``` /// /// ```not_rust @@ -80,7 +80,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 +/// postgresql://user:password@%2Fvar%2Frun%2Fpostgresql/mydb?connect_timeout=10 /// ``` /// /// ```not_rust @@ -88,7 +88,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql:///mydb?user=user&host=/var/lib/postgresql +/// postgresql:///mydb?user=user&host=/var/run/postgresql /// ``` #[derive(Clone)] pub struct Config { From 235dfeb95ad81a62fa036a3e9cc64bfd0e1293d2 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 17 Nov 2020 13:29:23 +0000 Subject: [PATCH 454/819] Update pin-project-lite requirement from 0.1 to 0.2 Updates the requirements on [pin-project-lite](https://github.com/taiki-e/pin-project-lite) to permit the latest version. - [Release notes](https://github.com/taiki-e/pin-project-lite/releases) - [Changelog](https://github.com/taiki-e/pin-project-lite/blob/master/CHANGELOG.md) - [Commits](https://github.com/taiki-e/pin-project-lite/compare/v0.1.0...v0.2.0) Signed-off-by: dependabot-preview[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 14f8c7e9d..f659663e1 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,7 @@ futures = "0.3" log = "0.4" parking_lot = "0.11" percent-encoding = "2.0" -pin-project-lite = "0.1" +pin-project-lite = "0.2" phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.2", path = "../postgres-types" } From aadf9420ef64e0f3775a3385446f65bcd904516d Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Sat, 28 Nov 2020 19:38:05 -0800 Subject: [PATCH 455/819] Add escape_identifier() and escape_literal(). --- postgres-protocol/src/escape/mod.rs | 93 ++++++++++++++++++++++++++++ postgres-protocol/src/escape/test.rs | 17 +++++ postgres-protocol/src/lib.rs | 1 + 3 files changed, 111 insertions(+) create mode 100644 postgres-protocol/src/escape/mod.rs create mode 100644 postgres-protocol/src/escape/test.rs diff --git a/postgres-protocol/src/escape/mod.rs b/postgres-protocol/src/escape/mod.rs new file mode 100644 index 000000000..4b3b56cd5 --- /dev/null +++ b/postgres-protocol/src/escape/mod.rs @@ -0,0 +1,93 @@ +//! Provides functions for escaping literals and identifiers for use +//! in SQL queries. +//! +//! Prefer parameterized queries where possible; see +//! [`Client::query`](crate::Client::query). Do not escape parameters. + +#[cfg(test)] +mod test; + +/// Escape a literal and surround result with single quotes. Not +/// recommended in most cases. +/// +/// If input contains backslashes, result will be of the form ` +/// E'...'` so it is safe to use regardless of the setting of +/// standard_conforming_strings. +pub fn escape_literal(input: &str) -> String { + escape_internal(input, false) +} + +/// Escape an identifier and surround result with double quotes. +pub fn escape_identifier(input: &str) -> String { + escape_internal(input, true) +} + +// Translation of PostgreSQL libpq's PQescapeInternal(). Does not +// require a connection because input string is known to be valid +// UTF-8. +// +// Escape arbitrary strings. If as_ident is true, we escape the +// result as an identifier; if false, as a literal. The result is +// returned in a newly allocated buffer. If we fail due to an +// encoding violation or out of memory condition, we return NULL, +// storing an error message into conn. +fn escape_internal(input: &str, as_ident: bool) -> String { + let mut num_backslashes = 0; + let mut num_quotes = 0; + let quote_char = if as_ident { '"' } else { '\'' }; + + // Scan the string for characters that must be escaped. + for ch in input.chars() { + if ch == quote_char { + num_quotes += 1; + } else if ch == '\\' { + num_backslashes += 1; + } + } + + // Allocate output String. + let mut result_size = input.len() + num_quotes + 3; // two quotes, plus a NUL + if !as_ident && num_backslashes > 0 { + result_size += num_backslashes + 2; + } + + let mut output = String::with_capacity(result_size); + + // If we are escaping a literal that contains backslashes, we use + // the escape string syntax so that the result is correct under + // either value of standard_conforming_strings. We also emit a + // leading space in this case, to guard against the possibility + // that the result might be interpolated immediately following an + // identifier. + if !as_ident && num_backslashes > 0 { + output.push(' '); + output.push('E'); + } + + // Opening quote. + output.push(quote_char); + + // Use fast path if possible. + // + // We've already verified that the input string is well-formed in + // the current encoding. If it contains no quotes and, in the + // case of literal-escaping, no backslashes, then we can just copy + // it directly to the output buffer, adding the necessary quotes. + // + // If not, we must rescan the input and process each character + // individually. + if num_quotes == 0 && (num_backslashes == 0 || as_ident) { + output.push_str(input); + } else { + for ch in input.chars() { + if ch == quote_char || (!as_ident && ch == '\\') { + output.push(ch); + } + output.push(ch); + } + } + + output.push(quote_char); + + output +} diff --git a/postgres-protocol/src/escape/test.rs b/postgres-protocol/src/escape/test.rs new file mode 100644 index 000000000..4816a103b --- /dev/null +++ b/postgres-protocol/src/escape/test.rs @@ -0,0 +1,17 @@ +use crate::escape::{escape_identifier, escape_literal}; + +#[test] +fn test_escape_idenifier() { + assert_eq!(escape_identifier("foo"), String::from("\"foo\"")); + assert_eq!(escape_identifier("f\\oo"), String::from("\"f\\oo\"")); + assert_eq!(escape_identifier("f'oo"), String::from("\"f'oo\"")); + assert_eq!(escape_identifier("f\"oo"), String::from("\"f\"\"oo\"")); +} + +#[test] +fn test_escape_literal() { + assert_eq!(escape_literal("foo"), String::from("'foo'")); + assert_eq!(escape_literal("f\\oo"), String::from(" E'f\\\\oo'")); + assert_eq!(escape_literal("f'oo"), String::from("'f''oo'")); + assert_eq!(escape_literal("f\"oo"), String::from("'f\"oo'")); +} diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 9ebbcba59..b4374afa1 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -17,6 +17,7 @@ use bytes::{BufMut, BytesMut}; use std::io; pub mod authentication; +pub mod escape; pub mod message; pub mod types; From 55dddc5c74e7e2f633fcad97201937c24dfdfff0 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Mon, 30 Nov 2020 09:27:21 -0800 Subject: [PATCH 456/819] Add instructions for running tests. --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index cbe7182c6..3af068174 100644 --- a/README.md +++ b/README.md @@ -32,3 +32,16 @@ TLS support for postgres and tokio-postgres via native-tls. [Documentation](https://docs.rs/postgres-openssl) TLS support for postgres and tokio-postgres via openssl. + +# Running test suite + +The test suite requires postgres to be running in the correct configuration. The easiest way to do this is with docker: + +1. Install `docker` and `docker-compose`. + 1. On ubuntu: `sudo apt install docker.io docker-compose`. +1. Make sure your user has permissions for docker. + 1. On ubuntu: ``sudo usermod -aG docker $USER`` +1. Change to top-level directory of `rust-postgres` repo. +1. Run `docker-compose up -d`. +1. Run `cargo test`. +1. Run `docker-compose stop`. From fa8fb83ec8815ff85994a5ed46dfe5574efffd37 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 1 Dec 2020 18:20:37 +0300 Subject: [PATCH 457/819] Fix tokio features for runtime feature in the docs --- tokio-postgres/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index c69fff793..8a29dfd00 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -103,7 +103,7 @@ //! //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | -//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.2 with the features `dns`, `net` and `time` | yes | +//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.3 with the `io-util` feature | yes | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | @@ -112,7 +112,7 @@ //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] +#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.6")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] pub use crate::cancel_token::CancelToken; From 5099799166ec44aff6f9a68ee2dc450f4c517df9 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 1 Dec 2020 19:03:02 +0300 Subject: [PATCH 458/819] return used features --- postgres-native-tls/src/lib.rs | 2 +- tokio-postgres/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 00413c27b..5bded10b4 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -1,4 +1,4 @@ -//! TLS support for `tokio-postgres` and `postgres` via `native-tls. +//! TLS support for `tokio-postgres` and `postgres` via `native-tls`. //! //! # Examples //! diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 8a29dfd00..90c2b0404 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -103,7 +103,7 @@ //! //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | -//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.3 with the `io-util` feature | yes | +//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.3 with the features `net` and `time` | yes | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | From 07d9fb2ed61f8e661ad06a3d587d369c18231503 Mon Sep 17 00:00:00 2001 From: zach-com <54674467+zach-com@users.noreply.github.com> Date: Fri, 11 Dec 2020 13:47:39 -0500 Subject: [PATCH 459/819] Support connection validation with timeout --- postgres/src/client.rs | 5 +++++ tokio-postgres/src/client.rs | 9 +++++++++ tokio-postgres/src/error/mod.rs | 6 ++++++ 3 files changed, 20 insertions(+) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index f5637cdbd..001141655 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -413,6 +413,11 @@ impl Client { self.connection.block_on(self.client.simple_query(query)) } + /// Validates connection, timing out after specified duration. + pub fn is_valid(&mut self, timeout: std::time::Duration) -> Result<(), Error> { + self.connection.block_on(self.client.is_valid(timeout)) + } + /// Executes a sequence of SQL statements using the simple query protocol. /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 359a7cd16..090b41480 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -450,6 +450,15 @@ impl Client { self.simple_query_raw(query).await?.try_collect().await } + /// Validates connection, timing out after specified duration. + pub async fn is_valid(&self, timeout: Duration) -> Result<(), Error> { + type SqmResult = Result, Error>; + type SqmTimeout = Result; + let sqm_future = self.simple_query_raw("").await?.try_collect(); + let sqm_timeout: SqmTimeout = tokio::time::timeout(timeout, sqm_future).await; + sqm_timeout.map_err(|_| Error::timeout())?.map(|_| ()) + } + pub(crate) async fn simple_query_raw(&self, query: &str) -> Result { simple_query::simple_query(self.inner(), query).await } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index f9335cfe7..529095607 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -354,6 +354,7 @@ enum Kind { RowCount, #[cfg(feature = "runtime")] Connect, + Timeout, } struct ErrorInner { @@ -392,6 +393,7 @@ impl fmt::Display for Error { Kind::RowCount => fmt.write_str("query returned an unexpected number of rows")?, #[cfg(feature = "runtime")] Kind::Connect => fmt.write_str("error connecting to server")?, + Kind::Timeout => fmt.write_str("timeout waiting for server")?, }; if let Some(ref cause) = self.0.cause { write!(fmt, ": {}", cause)?; @@ -491,4 +493,8 @@ impl Error { pub(crate) fn connect(e: io::Error) -> Error { Error::new(Kind::Connect, Some(Box::new(e))) } + + pub(crate) fn timeout() -> Error { + Error::new(Kind::Timeout, None) + } } From c573edbdfb6c83305e59f54dbf045887f85b6577 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Tue, 15 Dec 2020 16:50:26 -0800 Subject: [PATCH 460/819] Documentation fixup. --- postgres-protocol/src/escape/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres-protocol/src/escape/mod.rs b/postgres-protocol/src/escape/mod.rs index 4b3b56cd5..0ba7efdca 100644 --- a/postgres-protocol/src/escape/mod.rs +++ b/postgres-protocol/src/escape/mod.rs @@ -1,8 +1,8 @@ //! Provides functions for escaping literals and identifiers for use //! in SQL queries. //! -//! Prefer parameterized queries where possible; see -//! [`Client::query`](crate::Client::query). Do not escape parameters. +//! Prefer parameterized queries where possible. Do not escape +//! parameters in a parameterized query. #[cfg(test)] mod test; From f655c3b74c59144aed35859fecf830e5ce5960db Mon Sep 17 00:00:00 2001 From: zach-com <54674467+zach-com@users.noreply.github.com> Date: Wed, 16 Dec 2020 15:26:06 -0500 Subject: [PATCH 461/819] Address pull request comments --- postgres/src/client.rs | 11 +++++++++-- tokio-postgres/src/bind.rs | 3 ++- tokio-postgres/src/client.rs | 9 --------- tokio-postgres/src/error/mod.rs | 5 ++++- tokio-postgres/src/query.rs | 3 ++- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 001141655..fcd7f772a 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -4,6 +4,7 @@ use crate::{ ToStatement, Transaction, TransactionBuilder, }; use std::task::Poll; +use std::time::Duration; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::types::{BorrowToSql, ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; @@ -414,8 +415,14 @@ impl Client { } /// Validates connection, timing out after specified duration. - pub fn is_valid(&mut self, timeout: std::time::Duration) -> Result<(), Error> { - self.connection.block_on(self.client.is_valid(timeout)) + pub fn is_valid(&mut self, timeout: Duration) -> Result<(), Error> { + let is_valid = Client::is_valid_inner(&self.client, timeout); + self.connection.block_on(is_valid) + } + + async fn is_valid_inner(client: &tokio_postgres::Client, timeout: Duration) -> Result<(), Error> { + let trivial_query = client.simple_query(""); + tokio::time::timeout(timeout, trivial_query).await?.map(|_| ()) } /// Executes a sequence of SQL statements using the simple query protocol. diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 9c5c49218..75bd938d9 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -20,8 +20,9 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { + type BytesResult = Result; let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); - let buf = client.with_buf(|buf| { + let buf = client.with_buf::<_, BytesResult>(|buf| { query::encode_bind(&statement, params, &name, buf)?; frontend::sync(buf); Ok(buf.split().freeze()) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 090b41480..359a7cd16 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -450,15 +450,6 @@ impl Client { self.simple_query_raw(query).await?.try_collect().await } - /// Validates connection, timing out after specified duration. - pub async fn is_valid(&self, timeout: Duration) -> Result<(), Error> { - type SqmResult = Result, Error>; - type SqmTimeout = Result; - let sqm_future = self.simple_query_raw("").await?.try_collect(); - let sqm_timeout: SqmTimeout = tokio::time::timeout(timeout, sqm_future).await; - sqm_timeout.map_err(|_| Error::timeout())?.map(|_| ()) - } - pub(crate) async fn simple_query_raw(&self, query: &str) -> Result { simple_query::simple_query(self.inner(), query).await } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 529095607..bb0dec9a5 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -5,6 +5,7 @@ use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; use std::error::{self, Error as _Error}; use std::fmt; use std::io; +use tokio::time::error::Elapsed; pub use self::sqlstate::*; @@ -493,8 +494,10 @@ impl Error { pub(crate) fn connect(e: io::Error) -> Error { Error::new(Kind::Connect, Some(Box::new(e))) } +} - pub(crate) fn timeout() -> Error { +impl From for Error { + fn from(_e: Elapsed) -> Error { Error::new(Kind::Timeout, None) } } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index f139ed915..d6179de4d 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -61,7 +61,8 @@ pub async fn query_portal( portal: &Portal, max_rows: i32, ) -> Result { - let buf = client.with_buf(|buf| { + type BytesResult = Result; + let buf = client.with_buf::<_, BytesResult>(|buf| { frontend::execute(portal.name(), max_rows, buf).map_err(Error::encode)?; frontend::sync(buf); Ok(buf.split().freeze()) From ee65811272ff221b6fcf605ef84507a02c2107c1 Mon Sep 17 00:00:00 2001 From: zach-com <54674467+zach-com@users.noreply.github.com> Date: Wed, 16 Dec 2020 15:56:16 -0500 Subject: [PATCH 462/819] Avoid unnecessary function call --- postgres/src/client.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index fcd7f772a..f34cffe1d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -416,13 +416,11 @@ impl Client { /// Validates connection, timing out after specified duration. pub fn is_valid(&mut self, timeout: Duration) -> Result<(), Error> { - let is_valid = Client::is_valid_inner(&self.client, timeout); - self.connection.block_on(is_valid) - } - - async fn is_valid_inner(client: &tokio_postgres::Client, timeout: Duration) -> Result<(), Error> { - let trivial_query = client.simple_query(""); - tokio::time::timeout(timeout, trivial_query).await?.map(|_| ()) + let inner_client = &self.client; + self.connection.block_on(async { + let trivial_query = inner_client.simple_query(""); + tokio::time::timeout(timeout, trivial_query).await?.map(|_| ()) + }) } /// Executes a sequence of SQL statements using the simple query protocol. From ef95f34c34a9a01b8bec6766cc56c34cc9ff6270 Mon Sep 17 00:00:00 2001 From: zach-com <54674467+zach-com@users.noreply.github.com> Date: Wed, 16 Dec 2020 22:17:21 -0500 Subject: [PATCH 463/819] `cargo fmt` --- postgres/src/client.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index f34cffe1d..adea5e984 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -419,7 +419,9 @@ impl Client { let inner_client = &self.client; self.connection.block_on(async { let trivial_query = inner_client.simple_query(""); - tokio::time::timeout(timeout, trivial_query).await?.map(|_| ()) + tokio::time::timeout(timeout, trivial_query) + .await? + .map(|_| ()) }) } From 9856c7b87a596023f659f8a281c7ef1caf2a3659 Mon Sep 17 00:00:00 2001 From: zach-com <54674467+zach-com@users.noreply.github.com> Date: Thu, 17 Dec 2020 17:55:22 -0500 Subject: [PATCH 464/819] Replace `impl From` block --- postgres/src/client.rs | 3 ++- tokio-postgres/src/bind.rs | 3 +-- tokio-postgres/src/error/mod.rs | 6 ++---- tokio-postgres/src/query.rs | 3 +-- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index adea5e984..050c5b229 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -420,7 +420,8 @@ impl Client { self.connection.block_on(async { let trivial_query = inner_client.simple_query(""); tokio::time::timeout(timeout, trivial_query) - .await? + .await + .map_err(|_| Error::timeout())? .map(|_| ()) }) } diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 75bd938d9..9c5c49218 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -20,9 +20,8 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - type BytesResult = Result; let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); - let buf = client.with_buf::<_, BytesResult>(|buf| { + let buf = client.with_buf(|buf| { query::encode_bind(&statement, params, &name, buf)?; frontend::sync(buf); Ok(buf.split().freeze()) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index bb0dec9a5..c5383df92 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -5,7 +5,6 @@ use postgres_protocol::message::backend::{ErrorFields, ErrorResponseBody}; use std::error::{self, Error as _Error}; use std::fmt; use std::io; -use tokio::time::error::Elapsed; pub use self::sqlstate::*; @@ -494,10 +493,9 @@ impl Error { pub(crate) fn connect(e: io::Error) -> Error { Error::new(Kind::Connect, Some(Box::new(e))) } -} -impl From for Error { - fn from(_e: Elapsed) -> Error { + #[doc(hidden)] + pub fn timeout() -> Error { Error::new(Kind::Timeout, None) } } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index d6179de4d..f139ed915 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -61,8 +61,7 @@ pub async fn query_portal( portal: &Portal, max_rows: i32, ) -> Result { - type BytesResult = Result; - let buf = client.with_buf::<_, BytesResult>(|buf| { + let buf = client.with_buf(|buf| { frontend::execute(portal.name(), max_rows, buf).map_err(Error::encode)?; frontend::sync(buf); Ok(buf.split().freeze()) From 2ab708c4bcd55bed8cee53b88436a5573d071abe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 17 Dec 2020 20:36:15 -0500 Subject: [PATCH 465/819] minor cleanup --- postgres/src/client.rs | 6 ++++-- tokio-postgres/src/error/mod.rs | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 050c5b229..c052836f0 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -414,14 +414,16 @@ impl Client { self.connection.block_on(self.client.simple_query(query)) } - /// Validates connection, timing out after specified duration. + /// Validates the connection by performing a simple no-op query. + /// + /// If the specified timeout is reached before the backend responds, an error will be returned. pub fn is_valid(&mut self, timeout: Duration) -> Result<(), Error> { let inner_client = &self.client; self.connection.block_on(async { let trivial_query = inner_client.simple_query(""); tokio::time::timeout(timeout, trivial_query) .await - .map_err(|_| Error::timeout())? + .map_err(|_| Error::__private_api_timeout())? .map(|_| ()) }) } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index c5383df92..3df529049 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -495,7 +495,7 @@ impl Error { } #[doc(hidden)] - pub fn timeout() -> Error { + pub fn __private_api_timeout() -> Error { Error::new(Kind::Timeout, None) } } From 07aa69febf157492a6bc068ffcf33bc7904983fd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Dec 2020 10:05:20 -0500 Subject: [PATCH 466/819] update rand --- postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/authentication/sasl.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 5c7e11478..3ac6acfb3 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -16,6 +16,6 @@ fallible-iterator = "0.2" hmac = "0.10" md5 = "0.7" memchr = "2.0" -rand = "0.7" +rand = "0.8" sha2 = "0.9" stringprep = "0.1" diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 416b4b998..d95471133 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -135,7 +135,7 @@ impl ScramSha256 { let mut rng = rand::thread_rng(); let nonce = (0..NONCE_LENGTH) .map(|_| { - let mut v = rng.gen_range(0x21u8, 0x7e); + let mut v = rng.gen_range(0x21u8..0x7e); if v == 0x2c { v = 0x7e } From c395b972a0d65b2587871f8eeafa64dab90aa7a1 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 24 Dec 2020 17:23:11 -0500 Subject: [PATCH 467/819] Create FUNDING.yml --- .github/FUNDING.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..b214efc24 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [sfackler] From f1729e46367f257607d9b1a9b246acf367514a2c Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Wed, 23 Dec 2020 21:03:15 -0500 Subject: [PATCH 468/819] deps: upgrade to tokio v1.0 ecosystem --- postgres-native-tls/Cargo.toml | 6 ++-- postgres-openssl/Cargo.toml | 6 ++-- postgres-openssl/src/lib.rs | 50 +++++++++++++++++++++++++----- postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- postgres-types/src/serde_json_1.rs | 1 - postgres/Cargo.toml | 4 +-- postgres/src/copy_out_reader.rs | 2 +- postgres/src/notifications.rs | 9 +++--- tokio-postgres/Cargo.toml | 8 ++--- tokio-postgres/src/binary_copy.rs | 2 +- tokio-postgres/src/copy_in.rs | 1 - 12 files changed, 63 insertions(+), 30 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 81dcf29e3..598531ad9 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures = "0.3" native-tls = "0.2" -tokio = "0.3" -tokio-native-tls = "0.2" +tokio = "1.0" +tokio-native-tls = "0.3" tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.3", features = ["full"] } +tokio = { version = "1.0", features = ["full"] } postgres = { version = "0.18.0", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index c825e929d..11fd9f828 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -18,10 +18,10 @@ runtime = ["tokio-postgres/runtime"] [dependencies] futures = "0.3" openssl = "0.10" -tokio = "0.3" -tokio-openssl = "0.5" +tokio = "1.0" +tokio-openssl = "0.6" tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.3", features = ["full"] } +tokio = { version = "1.0", features = ["full"] } postgres = { version = "0.18.0", path = "../postgres" } diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 3780f2082..cad06d486 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -48,8 +48,10 @@ use openssl::hash::MessageDigest; use openssl::nid::Nid; #[cfg(feature = "runtime")] use openssl::ssl::SslConnector; -use openssl::ssl::{ConnectConfiguration, SslRef}; -use std::fmt::Debug; +use openssl::ssl::{self, ConnectConfiguration, SslRef}; +use openssl::x509::X509VerifyResult; +use std::error::Error; +use std::fmt::{self, Debug}; use std::future::Future; use std::io; use std::pin::Pin; @@ -57,7 +59,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tokio_openssl::{HandshakeError, SslStream}; +use tokio_openssl::SslStream; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; @@ -131,23 +133,55 @@ impl TlsConnector { impl TlsConnect for TlsConnector where - S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Stream = TlsStream; - type Error = HandshakeError; + type Error = Box; #[allow(clippy::type_complexity)] - type Future = Pin, HandshakeError>> + Send>>; + type Future = Pin, Self::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { let future = async move { - let stream = tokio_openssl::connect(self.ssl, &self.domain, stream).await?; - Ok(TlsStream(stream)) + let ssl = self.ssl.into_ssl(&self.domain)?; + let mut stream = SslStream::new(ssl, stream)?; + match Pin::new(&mut stream).connect().await { + Ok(()) => Ok(TlsStream(stream)), + Err(error) => Err(Box::new(ConnectError { + error, + verify_result: stream.ssl().verify_result(), + }) as _), + } }; Box::pin(future) } } +#[derive(Debug)] +struct ConnectError { + error: ssl::Error, + verify_result: X509VerifyResult, +} + +impl fmt::Display for ConnectError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.error, fmt)?; + + if self.verify_result != X509VerifyResult::OK { + fmt.write_str(": ")?; + fmt::Display::fmt(&self.verify_result, fmt)?; + } + + Ok(()) + } +} + +impl Error for ConnectError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.error) + } +} + /// The stream returned by `TlsConnector`. pub struct TlsStream(SslStream); diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 3ac6acfb3..ed5bca864 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -11,7 +11,7 @@ readme = "../README.md" [dependencies] base64 = "0.13" byteorder = "1.0" -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" hmac = "0.10" md5 = "0.7" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index c5e706f6e..00babcce2 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -22,7 +22,7 @@ with-uuid-0_8 = ["uuid-08"] with-time-0_2 = ["time-02"] [dependencies] -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index e0fecb496..b98d561d1 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,5 +1,4 @@ use crate::{FromSql, IsNull, ToSql, Type}; -use bytes::buf::BufMutExt; use bytes::{BufMut, BytesMut}; use serde_1::{Deserialize, Serialize}; use serde_json_1::Value; diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index b84becd19..52ba17a47 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -31,12 +31,12 @@ with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] [dependencies] -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" futures = "0.3" tokio-postgres = { version = "0.6.0", path = "../tokio-postgres" } -tokio = { version = "0.3", features = ["rt", "time"] } +tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" [dev-dependencies] diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index fd9c27fb0..e8b478d49 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -46,7 +46,7 @@ impl BufRead for CopyOutReader<'_> { }; } - Ok(self.cur.bytes()) + Ok(&self.cur) } fn consume(&mut self, amt: usize) { diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs index 241c95a5d..ea44c31f8 100644 --- a/postgres/src/notifications.rs +++ b/postgres/src/notifications.rs @@ -4,6 +4,7 @@ use crate::connection::ConnectionRef; use crate::{Error, Notification}; use fallible_iterator::FallibleIterator; use futures::{ready, FutureExt}; +use std::pin::Pin; use std::task::Poll; use std::time::Duration; use tokio::time::{self, Instant, Sleep}; @@ -64,7 +65,7 @@ impl<'a> Notifications<'a> { /// This iterator may start returning `Some` after previously returning `None` if more notifications are received. pub fn timeout_iter(&mut self, timeout: Duration) -> TimeoutIter<'_> { TimeoutIter { - delay: self.connection.enter(|| time::sleep(timeout)), + delay: Box::pin(self.connection.enter(|| time::sleep(timeout))), timeout, connection: self.connection.as_ref(), } @@ -124,7 +125,7 @@ impl<'a> FallibleIterator for BlockingIter<'a> { /// A time-limited blocking iterator over pending notifications. pub struct TimeoutIter<'a> { connection: ConnectionRef<'a>, - delay: Sleep, + delay: Pin>, timeout: Duration, } @@ -134,7 +135,7 @@ impl<'a> FallibleIterator for TimeoutIter<'a> { fn next(&mut self) -> Result, Self::Error> { if let Some(notification) = self.connection.notifications_mut().pop_front() { - self.delay.reset(Instant::now() + self.timeout); + self.delay.as_mut().reset(Instant::now() + self.timeout); return Ok(Some(notification)); } @@ -143,7 +144,7 @@ impl<'a> FallibleIterator for TimeoutIter<'a> { self.connection.poll_block_on(|cx, notifications, done| { match notifications.pop_front() { Some(notification) => { - delay.reset(Instant::now() + timeout); + delay.as_mut().reset(Instant::now() + timeout); return Poll::Ready(Ok(Some(notification))); } None if done => return Poll::Ready(Ok(None)), diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f659663e1..1c82f7da8 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -38,7 +38,7 @@ with-time-0_2 = ["postgres-types/with-time-0_2"] [dependencies] async-trait = "0.1" -bytes = "0.5" +bytes = "1.0" byteorder = "1.0" fallible-iterator = "0.2" futures = "0.3" @@ -50,11 +50,11 @@ phf = "0.8" postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.2", path = "../postgres-types" } socket2 = "0.3" -tokio = { version = "0.3", features = ["io-util"] } -tokio-util = { version = "0.4", features = ["codec"] } +tokio = { version = "1.0", features = ["io-util"] } +tokio-util = { version = "0.6", features = ["codec"] } [dev-dependencies] -tokio = { version = "0.3", features = ["full"] } +tokio = { version = "1.0", features = ["full"] } env_logger = "0.8" criterion = "0.3" diff --git a/tokio-postgres/src/binary_copy.rs b/tokio-postgres/src/binary_copy.rs index 20064c728..3b1e13cd7 100644 --- a/tokio-postgres/src/binary_copy.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -153,7 +153,7 @@ impl Stream for BinaryCopyOutStream { Some(header) => header.has_oids, None => { check_remaining(&chunk, HEADER_LEN)?; - if &chunk.bytes()[..MAGIC.len()] != MAGIC { + if !chunk.chunk().starts_with(MAGIC) { return Poll::Ready(Some(Err(Error::parse(io::Error::new( io::ErrorKind::InvalidData, "invalid magic value", diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index fc712f6db..bc90e5277 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -2,7 +2,6 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{query, slice_iter, Error, Statement}; -use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; use futures::channel::mpsc; use futures::future; From 06dcebf248fe067d1ef29ef3427561a4248730f9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 08:51:17 -0500 Subject: [PATCH 469/819] Release postgres-protocol v0.6.0 --- postgres-protocol/CHANGELOG.md | 10 ++++++++++ postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index b476fcabf..4040c3ba2 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,15 @@ # Change Log +## v0.6.0 - 2020-12-25 + +### Changed + +* Upgraded `bytes`, `hmac`, and `rand`. + +### Added + +* Added `escape::{escape_literal, escape_identifier}`. + ## v0.5.3 - 2020-10-17 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ed5bca864..6746cfebd 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.5.3" +version = "0.6.0" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 00babcce2..0707345fa 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -24,7 +24,7 @@ with-time-0_2 = ["time-02"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 1c82f7da8..24d962f65 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -47,7 +47,7 @@ parking_lot = "0.11" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.8" -postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } postgres-types = { version = "0.1.2", path = "../postgres-types" } socket2 = "0.3" tokio = { version = "1.0", features = ["io-util"] } From 71fc3e74bd9bad23c381a3df370c5f5bb23558fe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 09:01:11 -0500 Subject: [PATCH 470/819] Release postgres-types v0.2.0 --- postgres-types/CHANGELOG.md | 10 ++++ postgres-types/Cargo.toml | 4 +- postgres-types/src/geo_types_04.rs | 78 ------------------------------ postgres-types/src/lib.rs | 2 - postgres/Cargo.toml | 1 - tokio-postgres/Cargo.toml | 4 +- 6 files changed, 12 insertions(+), 87 deletions(-) delete mode 100644 postgres-types/src/geo_types_04.rs diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 1c267923c..4fb55631b 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,15 @@ # Change Log +## v0.2.0 - 2020-12-25 + +### Changed + +* Upgraded `bytes` to 1.0. + +### Removed + +* Removed support for `geo-types` 0.4. + ## v0.1.3 - 2020-10-17 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 0707345fa..40edc621b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.1.3" +version = "0.2.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -15,7 +15,6 @@ derive = ["postgres-derive"] with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] -with-geo-types-0_4 = ["geo-types-04"] with-geo-types-0_6 = ["geo-types-06"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] @@ -30,7 +29,6 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-types-04 = { version = "0.4", package = "geo-types", optional = true } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } diff --git a/postgres-types/src/geo_types_04.rs b/postgres-types/src/geo_types_04.rs deleted file mode 100644 index eb8b958eb..000000000 --- a/postgres-types/src/geo_types_04.rs +++ /dev/null @@ -1,78 +0,0 @@ -use bytes::BytesMut; -use fallible_iterator::FallibleIterator; -use geo_types_04::{Coordinate, LineString, Point, Rect}; -use postgres_protocol::types; -use std::error::Error; - -use crate::{FromSql, IsNull, ToSql, Type}; - -impl<'a> FromSql<'a> for Point { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let point = types::point_from_sql(raw)?; - Ok(Point::new(point.x(), point.y())) - } - - accepts!(POINT); -} - -impl ToSql for Point { - fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { - types::point_to_sql(self.x(), self.y(), out); - Ok(IsNull::No) - } - - accepts!(POINT); - to_sql_checked!(); -} - -impl<'a> FromSql<'a> for Rect { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let rect = types::box_from_sql(raw)?; - Ok(Rect { - min: Coordinate { - x: rect.lower_left().x(), - y: rect.lower_left().y(), - }, - max: Coordinate { - x: rect.upper_right().x(), - y: rect.upper_right().y(), - }, - }) - } - - accepts!(BOX); -} - -impl ToSql for Rect { - fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { - types::box_to_sql(self.min.x, self.min.y, self.max.x, self.max.y, out); - Ok(IsNull::No) - } - - accepts!(BOX); - to_sql_checked!(); -} - -impl<'a> FromSql<'a> for LineString { - fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let path = types::path_from_sql(raw)?; - let points = path - .points() - .map(|p| Ok(Coordinate { x: p.x(), y: p.y() })) - .collect()?; - Ok(LineString(points)) - } - - accepts!(PATH); -} - -impl ToSql for LineString { - fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { - let closed = false; // always encode an open path from LineString - types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; - Ok(IsNull::No) - } - - accepts!(PATH); - to_sql_checked!(); -} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 7e81998b3..2909b81ef 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -191,8 +191,6 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-types-0_4")] -mod geo_types_04; #[cfg(feature = "with-geo-types-0_6")] mod geo_types_06; #[cfg(feature = "with-serde_json-1")] diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 52ba17a47..70d7ee30d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -24,7 +24,6 @@ circle-ci = { repository = "sfackler/rust-postgres" } with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] -with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 24d962f65..908d1a381 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -30,7 +30,6 @@ runtime = ["tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] -with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] @@ -48,7 +47,7 @@ percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.8" postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } -postgres-types = { version = "0.1.2", path = "../postgres-types" } +postgres-types = { version = "0.2.0", path = "../postgres-types" } socket2 = "0.3" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.6", features = ["codec"] } @@ -61,7 +60,6 @@ criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-04 = { version = "0.4", package = "eui48" } -geo-types-04 = { version = "0.4", package = "geo-types" } geo-types-06 = { version = "0.6", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } From 4fb6fd906f35a4736f50bec65d1e89da55f30a5f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 09:08:20 -0500 Subject: [PATCH 471/819] Release tokio-postgres v0.7.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 13 ++++++++++++- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/lib.rs | 2 +- 6 files changed, 17 insertions(+), 6 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 598531ad9..c55963c53 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3" native-tls = "0.2" tokio = "1.0" tokio-native-tls = "0.3" -tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "1.0", features = ["full"] } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 11fd9f828..ac68c549c 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3" openssl = "0.10" tokio = "1.0" tokio-openssl = "0.6" -tokio-postgres = { version = "0.6.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] tokio = { version = "1.0", features = ["full"] } diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 70d7ee30d..ebbb919f2 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -33,7 +33,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "1.0" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.6.0", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.0", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 95fd3ed6a..7d513daa2 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,10 +1,21 @@ # Change Log +## v0.7.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio` 1.0. +* Upgraded to `postgres-types` 0.2. + +### Added + +* Methods taking iterators of `ToSql` values can now take both `&dyn ToSql` and `T: ToSql` values. + ## v0.6.0 - 2020-10-17 ### Changed -* Upgraded to tokio `0.3`. +* Upgraded to `tokio` 0.3. * Added the detail and hint fields to `DbError`'s `Display` implementation. ## v0.5.5 - 2020-07-03 diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 908d1a381..613af127f 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.6.0" +version = "0.7.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 90c2b0404..6e73b883d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -112,7 +112,7 @@ //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.6")] +#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.7")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] pub use crate::cancel_token::CancelToken; From de831c3a5384546b0fd6f667dbe7f767fcb6e7a0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 09:14:06 -0500 Subject: [PATCH 472/819] Release postgres v0.19.0 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 16 ++++++++++++++-- postgres/Cargo.toml | 2 +- postgres/src/lib.rs | 1 - 5 files changed, 17 insertions(+), 6 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index c55963c53..471101af4 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "1.0", features = ["full"] } -postgres = { version = "0.18.0", path = "../postgres" } +postgres = { version = "0.19.0", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index ac68c549c..cf16055c7 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-featur [dev-dependencies] tokio = { version = "1.0", features = ["full"] } -postgres = { version = "0.18.0", path = "../postgres" } +postgres = { version = "0.19.0", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 2c9443986..2376068a8 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,6 +1,18 @@ # Change Log -## v0.18.1 - 20201-10-19 +## v0.19.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. +* Methods taking iterators of `ToSql` values can now take both `&dyn ToSql` and `T: ToSql` values. + +### Added + +* Added `Client::is_valid` which can be used to check that the connection is still alive with a + timeout. + +## v0.18.1 - 2020-10-19 ### Fixed @@ -69,7 +81,7 @@ * `Client::query_raw` now returns a named type. * `Client::copy_in` and `Client::copy_out` no longer take query parameters as PostgreSQL doesn't support them in COPY queries. - + ### Removed * Removed support for `uuid` 0.7. diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ebbb919f2..ed63d0c1a 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.18.1" +version = "0.19.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index dc0fd4440..f25fe1175 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -61,7 +61,6 @@ //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | -#![doc(html_root_url = "https://docs.rs/postgres/0.17")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] pub use fallible_iterator; From eeee279277db483148071e6844e2b5da2adcb3cd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 09:18:12 -0500 Subject: [PATCH 473/819] Relese postgres-native-tls v0.5.0 --- postgres-native-tls/CHANGELOG.md | 6 ++++++ postgres-native-tls/Cargo.toml | 2 +- postgres-native-tls/src/lib.rs | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index a840592e3..9eb7ab800 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. + ## v0.4.0 - 2020-10-17 ### Changed diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 471101af4..8180cd012 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-native-tls" -version = "0.4.0" +version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 5bded10b4..70e34812d 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -45,7 +45,6 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] use std::future::Future; From 6e9ee0fa8424f4ebca527c4a5d51575ee9d4e1c4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Dec 2020 09:20:58 -0500 Subject: [PATCH 474/819] Release postgres-openssl v0.5.0 --- postgres-openssl/CHANGELOG.md | 6 ++++++ postgres-openssl/Cargo.toml | 2 +- postgres-openssl/src/lib.rs | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index f740828c5..346214ae8 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.5.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. + ## v0.4.0 - 2020-10-17 ### Changed diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index cf16055c7..5738e74d2 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-openssl" -version = "0.4.0" +version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index cad06d486..dce3dff5d 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -39,7 +39,6 @@ //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] #[cfg(feature = "runtime")] From 316c6dc3dca7cdcbe7cd1338f38b1116b35a4e77 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 26 Dec 2020 14:09:58 -0500 Subject: [PATCH 475/819] Update codegen sources to 13.1 --- codegen/src/errcodes.txt | 3 +- codegen/src/pg_range.dat | 2 +- codegen/src/pg_type.dat | 53 ++- postgres-types/src/type_gen.rs | 90 ++++- tokio-postgres/src/error/sqlstate.rs | 559 ++++++++++++++------------- 5 files changed, 410 insertions(+), 297 deletions(-) diff --git a/codegen/src/errcodes.txt b/codegen/src/errcodes.txt index 867e98b69..c79312ed0 100644 --- a/codegen/src/errcodes.txt +++ b/codegen/src/errcodes.txt @@ -2,7 +2,7 @@ # errcodes.txt # PostgreSQL error codes # -# Copyright (c) 2003-2019, PostgreSQL Global Development Group +# Copyright (c) 2003-2020, PostgreSQL Global Development Group # # This list serves as the basis for generating source files containing error # codes. It is kept in a common format to make sure all these source files have @@ -207,6 +207,7 @@ Section: Class 22 - Data Exception 2200S E ERRCODE_INVALID_XML_COMMENT invalid_xml_comment 2200T E ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION invalid_xml_processing_instruction 22030 E ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE duplicate_json_object_key_value +22031 E ERRCODE_INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION invalid_argument_for_sql_json_datetime_function 22032 E ERRCODE_INVALID_JSON_TEXT invalid_json_text 22033 E ERRCODE_INVALID_SQL_JSON_SUBSCRIPT invalid_sql_json_subscript 22034 E ERRCODE_MORE_THAN_ONE_SQL_JSON_ITEM more_than_one_sql_json_item diff --git a/codegen/src/pg_range.dat b/codegen/src/pg_range.dat index dd9baa267..479754c24 100644 --- a/codegen/src/pg_range.dat +++ b/codegen/src/pg_range.dat @@ -3,7 +3,7 @@ # pg_range.dat # Initial contents of the pg_range system catalog. # -# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_range.dat diff --git a/codegen/src/pg_type.dat b/codegen/src/pg_type.dat index be49e0011..e8be00083 100644 --- a/codegen/src/pg_type.dat +++ b/codegen/src/pg_type.dat @@ -3,7 +3,7 @@ # pg_type.dat # Initial contents of the pg_type system catalog. # -# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_type.dat @@ -177,6 +177,10 @@ typtype => 'p', typcategory => 'P', typinput => 'pg_ddl_command_in', typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', typsend => 'pg_ddl_command_send', typalign => 'ALIGNOF_POINTER' }, +{ oid => '5069', array_type_oid => '271', descr => 'full transaction id', + typname => 'xid8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'U', typinput => 'xid8in', typoutput => 'xid8out', + typreceive => 'xid8recv', typsend => 'xid8send', typalign => 'd' }, # OIDS 600 - 699 @@ -215,9 +219,9 @@ { oid => '700', array_type_oid => '1021', descr => 'single-precision floating point number, 4-byte storage', - typname => 'float4', typlen => '4', typbyval => 'FLOAT4PASSBYVAL', - typcategory => 'N', typinput => 'float4in', typoutput => 'float4out', - typreceive => 'float4recv', typsend => 'float4send', typalign => 'i' }, + typname => 'float4', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'float4in', typoutput => 'float4out', typreceive => 'float4recv', + typsend => 'float4send', typalign => 'i' }, { oid => '701', array_type_oid => '1022', descr => 'double-precision floating point number, 8-byte storage', typname => 'float8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', @@ -379,6 +383,11 @@ typname => 'regclass', typlen => '4', typbyval => 't', typcategory => 'N', typinput => 'regclassin', typoutput => 'regclassout', typreceive => 'regclassrecv', typsend => 'regclasssend', typalign => 'i' }, +{ oid => '4191', array_type_oid => '4192', descr => 'registered collation', + typname => 'regcollation', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regcollationin', typoutput => 'regcollationout', + typreceive => 'regcollationrecv', typsend => 'regcollationsend', + typalign => 'i' }, { oid => '2206', array_type_oid => '2211', descr => 'registered type', typname => 'regtype', typlen => '4', typbyval => 't', typcategory => 'N', typinput => 'regtypein', typoutput => 'regtypeout', @@ -451,6 +460,11 @@ typcategory => 'U', typinput => 'txid_snapshot_in', typoutput => 'txid_snapshot_out', typreceive => 'txid_snapshot_recv', typsend => 'txid_snapshot_send', typalign => 'd', typstorage => 'x' }, +{ oid => '5038', array_type_oid => '5039', descr => 'snapshot', + typname => 'pg_snapshot', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'pg_snapshot_in', typoutput => 'pg_snapshot_out', + typreceive => 'pg_snapshot_recv', typsend => 'pg_snapshot_send', + typalign => 'd', typstorage => 'x' }, # range types { oid => '3904', array_type_oid => '3905', descr => 'range of integers', @@ -546,10 +560,6 @@ typtype => 'p', typcategory => 'P', typinput => 'internal_in', typoutput => 'internal_out', typreceive => '-', typsend => '-', typalign => 'ALIGNOF_POINTER' }, -{ oid => '2282', descr => 'obsolete, deprecated pseudo-type', - typname => 'opaque', typlen => '4', typbyval => 't', typtype => 'p', - typcategory => 'P', typinput => 'opaque_in', typoutput => 'opaque_out', - typreceive => '-', typsend => '-', typalign => 'i' }, { oid => '2283', descr => 'pseudo-type representing a polymorphic base type', typname => 'anyelement', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'anyelement_in', @@ -590,9 +600,34 @@ typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-', typalign => 'i' }, { oid => '3831', - descr => 'pseudo-type representing a polymorphic base type that is a range', + descr => 'pseudo-type representing a range over a polymorphic base type', typname => 'anyrange', typlen => '-1', typbyval => 'f', typtype => 'p', typcategory => 'P', typinput => 'anyrange_in', typoutput => 'anyrange_out', typreceive => '-', typsend => '-', typalign => 'd', typstorage => 'x' }, +{ oid => '5077', + descr => 'pseudo-type representing a polymorphic common type', + typname => 'anycompatible', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anycompatible_in', + typoutput => 'anycompatible_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '5078', + descr => 'pseudo-type representing an array of polymorphic common type elements', + typname => 'anycompatiblearray', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblearray_in', + typoutput => 'anycompatiblearray_out', + typreceive => 'anycompatiblearray_recv', typsend => 'anycompatiblearray_send', + typalign => 'd', typstorage => 'x' }, +{ oid => '5079', + descr => 'pseudo-type representing a polymorphic common type that is not an array', + typname => 'anycompatiblenonarray', typlen => '4', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblenonarray_in', + typoutput => 'anycompatiblenonarray_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '5080', + descr => 'pseudo-type representing a range over a polymorphic common type', + typname => 'anycompatiblerange', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblerange_in', + typoutput => 'anycompatiblerange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, ] diff --git a/postgres-types/src/type_gen.rs b/postgres-types/src/type_gen.rs index 8156ccfa5..85978096a 100644 --- a/postgres-types/src/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -35,6 +35,7 @@ pub enum Inner { PgNodeTree, JsonArray, TableAmHandler, + Xid8Array, IndexAmHandler, Point, Lseg, @@ -125,7 +126,6 @@ pub enum Inner { Trigger, LanguageHandler, Internal, - Opaque, Anyelement, RecordArray, Anynonarray, @@ -172,7 +172,16 @@ pub enum Inner { RegnamespaceArray, Regrole, RegroleArray, + Regcollation, + RegcollationArray, PgMcvList, + PgSnapshot, + PgSnapshotArray, + Xid8, + Anycompatible, + Anycompatiblearray, + Anycompatiblenonarray, + AnycompatibleRange, Other(Arc), } @@ -201,6 +210,7 @@ impl Inner { 194 => Some(Inner::PgNodeTree), 199 => Some(Inner::JsonArray), 269 => Some(Inner::TableAmHandler), + 271 => Some(Inner::Xid8Array), 325 => Some(Inner::IndexAmHandler), 600 => Some(Inner::Point), 601 => Some(Inner::Lseg), @@ -291,7 +301,6 @@ impl Inner { 2279 => Some(Inner::Trigger), 2280 => Some(Inner::LanguageHandler), 2281 => Some(Inner::Internal), - 2282 => Some(Inner::Opaque), 2283 => Some(Inner::Anyelement), 2287 => Some(Inner::RecordArray), 2776 => Some(Inner::Anynonarray), @@ -338,7 +347,16 @@ impl Inner { 4090 => Some(Inner::RegnamespaceArray), 4096 => Some(Inner::Regrole), 4097 => Some(Inner::RegroleArray), + 4191 => Some(Inner::Regcollation), + 4192 => Some(Inner::RegcollationArray), 5017 => Some(Inner::PgMcvList), + 5038 => Some(Inner::PgSnapshot), + 5039 => Some(Inner::PgSnapshotArray), + 5069 => Some(Inner::Xid8), + 5077 => Some(Inner::Anycompatible), + 5078 => Some(Inner::Anycompatiblearray), + 5079 => Some(Inner::Anycompatiblenonarray), + 5080 => Some(Inner::AnycompatibleRange), _ => None, } } @@ -367,6 +385,7 @@ impl Inner { Inner::PgNodeTree => 194, Inner::JsonArray => 199, Inner::TableAmHandler => 269, + Inner::Xid8Array => 271, Inner::IndexAmHandler => 325, Inner::Point => 600, Inner::Lseg => 601, @@ -457,7 +476,6 @@ impl Inner { Inner::Trigger => 2279, Inner::LanguageHandler => 2280, Inner::Internal => 2281, - Inner::Opaque => 2282, Inner::Anyelement => 2283, Inner::RecordArray => 2287, Inner::Anynonarray => 2776, @@ -504,7 +522,16 @@ impl Inner { Inner::RegnamespaceArray => 4090, Inner::Regrole => 4096, Inner::RegroleArray => 4097, + Inner::Regcollation => 4191, + Inner::RegcollationArray => 4192, Inner::PgMcvList => 5017, + Inner::PgSnapshot => 5038, + Inner::PgSnapshotArray => 5039, + Inner::Xid8 => 5069, + Inner::Anycompatible => 5077, + Inner::Anycompatiblearray => 5078, + Inner::Anycompatiblenonarray => 5079, + Inner::AnycompatibleRange => 5080, Inner::Other(ref u) => u.oid, } } @@ -533,6 +560,7 @@ impl Inner { Inner::PgNodeTree => &Kind::Simple, Inner::JsonArray => &Kind::Array(Type(Inner::Json)), Inner::TableAmHandler => &Kind::Pseudo, + Inner::Xid8Array => &Kind::Array(Type(Inner::Xid8)), Inner::IndexAmHandler => &Kind::Pseudo, Inner::Point => &Kind::Simple, Inner::Lseg => &Kind::Simple, @@ -623,7 +651,6 @@ impl Inner { Inner::Trigger => &Kind::Pseudo, Inner::LanguageHandler => &Kind::Pseudo, Inner::Internal => &Kind::Pseudo, - Inner::Opaque => &Kind::Pseudo, Inner::Anyelement => &Kind::Pseudo, Inner::RecordArray => &Kind::Pseudo, Inner::Anynonarray => &Kind::Pseudo, @@ -670,7 +697,16 @@ impl Inner { Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), Inner::Regrole => &Kind::Simple, Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), + Inner::Regcollation => &Kind::Simple, + Inner::RegcollationArray => &Kind::Array(Type(Inner::Regcollation)), Inner::PgMcvList => &Kind::Simple, + Inner::PgSnapshot => &Kind::Simple, + Inner::PgSnapshotArray => &Kind::Array(Type(Inner::PgSnapshot)), + Inner::Xid8 => &Kind::Simple, + Inner::Anycompatible => &Kind::Pseudo, + Inner::Anycompatiblearray => &Kind::Pseudo, + Inner::Anycompatiblenonarray => &Kind::Pseudo, + Inner::AnycompatibleRange => &Kind::Pseudo, Inner::Other(ref u) => &u.kind, } } @@ -699,6 +735,7 @@ impl Inner { Inner::PgNodeTree => "pg_node_tree", Inner::JsonArray => "_json", Inner::TableAmHandler => "table_am_handler", + Inner::Xid8Array => "_xid8", Inner::IndexAmHandler => "index_am_handler", Inner::Point => "point", Inner::Lseg => "lseg", @@ -789,7 +826,6 @@ impl Inner { Inner::Trigger => "trigger", Inner::LanguageHandler => "language_handler", Inner::Internal => "internal", - Inner::Opaque => "opaque", Inner::Anyelement => "anyelement", Inner::RecordArray => "_record", Inner::Anynonarray => "anynonarray", @@ -836,7 +872,16 @@ impl Inner { Inner::RegnamespaceArray => "_regnamespace", Inner::Regrole => "regrole", Inner::RegroleArray => "_regrole", + Inner::Regcollation => "regcollation", + Inner::RegcollationArray => "_regcollation", Inner::PgMcvList => "pg_mcv_list", + Inner::PgSnapshot => "pg_snapshot", + Inner::PgSnapshotArray => "_pg_snapshot", + Inner::Xid8 => "xid8", + Inner::Anycompatible => "anycompatible", + Inner::Anycompatiblearray => "anycompatiblearray", + Inner::Anycompatiblenonarray => "anycompatiblenonarray", + Inner::AnycompatibleRange => "anycompatiblerange", Inner::Other(ref u) => &u.name, } } @@ -908,6 +953,9 @@ impl Type { /// TABLE_AM_HANDLER pub const TABLE_AM_HANDLER: Type = Type(Inner::TableAmHandler); + /// XID8[] + pub const XID8_ARRAY: Type = Type(Inner::Xid8Array); + /// INDEX_AM_HANDLER - pseudo-type for the result of an index AM handler function pub const INDEX_AM_HANDLER: Type = Type(Inner::IndexAmHandler); @@ -1178,9 +1226,6 @@ impl Type { /// INTERNAL - pseudo-type representing an internal data structure pub const INTERNAL: Type = Type(Inner::Internal); - /// OPAQUE - obsolete, deprecated pseudo-type - pub const OPAQUE: Type = Type(Inner::Opaque); - /// ANYELEMENT - pseudo-type representing a polymorphic base type pub const ANYELEMENT: Type = Type(Inner::Anyelement); @@ -1259,7 +1304,7 @@ impl Type { /// JSONB[] pub const JSONB_ARRAY: Type = Type(Inner::JsonbArray); - /// ANYRANGE - pseudo-type representing a polymorphic base type that is a range + /// ANYRANGE - pseudo-type representing a range over a polymorphic base type pub const ANY_RANGE: Type = Type(Inner::AnyRange); /// EVENT_TRIGGER - pseudo-type for the result of an event trigger function @@ -1319,6 +1364,33 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); + /// REGCOLLATION - registered collation + pub const REGCOLLATION: Type = Type(Inner::Regcollation); + + /// REGCOLLATION[] + pub const REGCOLLATION_ARRAY: Type = Type(Inner::RegcollationArray); + /// PG_MCV_LIST - multivariate MCV list pub const PG_MCV_LIST: Type = Type(Inner::PgMcvList); + + /// PG_SNAPSHOT - snapshot + pub const PG_SNAPSHOT: Type = Type(Inner::PgSnapshot); + + /// PG_SNAPSHOT[] + pub const PG_SNAPSHOT_ARRAY: Type = Type(Inner::PgSnapshotArray); + + /// XID8 - full transaction id + pub const XID8: Type = Type(Inner::Xid8); + + /// ANYCOMPATIBLE - pseudo-type representing a polymorphic common type + pub const ANYCOMPATIBLE: Type = Type(Inner::Anycompatible); + + /// ANYCOMPATIBLEARRAY - pseudo-type representing an array of polymorphic common type elements + pub const ANYCOMPATIBLEARRAY: Type = Type(Inner::Anycompatiblearray); + + /// ANYCOMPATIBLENONARRAY - pseudo-type representing a polymorphic common type that is not an array + pub const ANYCOMPATIBLENONARRAY: Type = Type(Inner::Anycompatiblenonarray); + + /// ANYCOMPATIBLERANGE - pseudo-type representing a range over a polymorphic common type + pub const ANYCOMPATIBLE_RANGE: Type = Type(Inner::AnycompatibleRange); } diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 013a26472..3a6ea0bdc 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -281,6 +281,10 @@ impl SqlState { /// 22030 pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Cow::Borrowed("22030")); + /// 22031 + pub const INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION: SqlState = + SqlState(Cow::Borrowed("22031")); + /// 22032 pub const INVALID_JSON_TEXT: SqlState = SqlState(Cow::Borrowed("22032")); @@ -826,318 +830,319 @@ impl SqlState { #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 3213172566270843353, + key: 732231254413039614, disps: ::phf::Slice::Static(&[ - (3, 46), (0, 6), - (0, 39), - (0, 0), - (0, 192), - (0, 49), - (0, 17), - (1, 138), - (0, 2), - (0, 117), - (0, 0), - (0, 33), - (16, 241), - (0, 20), - (2, 148), (0, 0), + (0, 218), + (0, 11), + (0, 31), + (0, 91), + (0, 55), + (0, 77), + (0, 72), (0, 1), - (1, 3), - (0, 27), - (0, 21), - (1, 75), - (13, 187), - (0, 3), - (0, 42), - (0, 12), - (0, 82), - (3, 253), - (0, 219), - (0, 6), - (4, 206), - (2, 16), - (5, 67), - (3, 15), - (0, 76), - (0, 57), - (5, 203), - (22, 134), - (1, 27), + (0, 73), + (1, 159), + (4, 4), + (0, 18), + (2, 100), + (0, 19), + (0, 16), + (0, 22), + (0, 51), (0, 0), - (1, 113), (0, 0), + (1, 2), + (2, 177), + (0, 10), + (1, 192), (0, 0), - (5, 11), - (0, 45), - (0, 62), - (0, 26), - (1, 158), - (21, 1), - (0, 4), - (5, 64), - (0, 77), - (1, 189), + (5, 245), + (0, 106), + (6, 243), + (47, 195), + (0, 146), + (4, 154), + (0, 2), + (4, 78), + (0, 196), + (0, 8), + (2, 146), + (0, 15), + (0, 170), + (0, 5), + (10, 18), + (0, 30), + (0, 33), + (0, 2), + (0, 0), + (47, 181), + (0, 144), + (39, 231), + (39, 173), + (0, 57), + (0, 7), + (1, 154), ]), entries: ::phf::Slice::Static(&[ - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("XX002", SqlState::INDEX_CORRUPTED), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("58P01", SqlState::UNDEFINED_FILE), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("P0001", SqlState::RAISE_EXCEPTION), - ("08P01", SqlState::PROTOCOL_VIOLATION), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("53200", SqlState::OUT_OF_MEMORY), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("42602", SqlState::INVALID_NAME), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("P0003", SqlState::TOO_MANY_ROWS), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("23505", SqlState::UNIQUE_VIOLATION), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("22012", SqlState::DIVISION_BY_ZERO), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("23001", SqlState::RESTRICT_VIOLATION), - ("23514", SqlState::CHECK_VIOLATION), - ("42939", SqlState::RESERVED_NAME), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("53100", SqlState::DISK_FULL), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("P0002", SqlState::NO_DATA_FOUND), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("58030", SqlState::IO_ERROR), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), ("01000", SqlState::WARNING), - ("22032", SqlState::INVALID_JSON_TEXT), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("02000", SqlState::NO_DATA), ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("42846", SqlState::CANNOT_COERCE), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("HV000", SqlState::FDW_ERROR), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("40000", SqlState::TRANSACTION_ROLLBACK), ("57P01", SqlState::ADMIN_SHUTDOWN), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("42P19", SqlState::INVALID_RECURSION), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("42804", SqlState::DATATYPE_MISMATCH), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("42710", SqlState::DUPLICATE_OBJECT), + ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("22027", SqlState::TRIM_ERROR), ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), - ("P0004", SqlState::ASSERT_FAILURE), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("XX000", SqlState::INTERNAL_ERROR), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("42804", SqlState::DATATYPE_MISMATCH), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("42703", SqlState::UNDEFINED_COLUMN), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("42704", SqlState::UNDEFINED_OBJECT), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("HV000", SqlState::FDW_ERROR), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("42701", SqlState::DUPLICATE_COLUMN), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("XX001", SqlState::DATA_CORRUPTED), - ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), - ("58P02", SqlState::DUPLICATE_FILE), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("58030", SqlState::IO_ERROR), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("42P20", SqlState::WINDOWING_ERROR), + ("3D000", SqlState::INVALID_CATALOG_NAME), ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("428C9", SqlState::GENERATED_ALWAYS), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("08006", SqlState::CONNECTION_FAILURE), - ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), ("F0000", SqlState::CONFIG_FILE_ERROR), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), ("57014", SqlState::QUERY_CANCELED), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), - ("02000", SqlState::NO_DATA), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("42883", SqlState::UNDEFINED_FUNCTION), + ("42939", SqlState::RESERVED_NAME), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("53200", SqlState::OUT_OF_MEMORY), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), + ("53100", SqlState::DISK_FULL), + ("P0003", SqlState::TOO_MANY_ROWS), ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("22027", SqlState::TRIM_ERROR), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("0L000", SqlState::INVALID_GRANTOR), - ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("XX000", SqlState::INTERNAL_ERROR), + ("58P01", SqlState::UNDEFINED_FILE), ("42601", SqlState::SYNTAX_ERROR), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("XX002", SqlState::INDEX_CORRUPTED), ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("55006", SqlState::OBJECT_IN_USE), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("42803", SqlState::GROUPING_ERROR), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("42P21", SqlState::COLLATION_MISMATCH), + ("42P07", SqlState::DUPLICATE_TABLE), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("XX001", SqlState::DATA_CORRUPTED), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("22032", SqlState::INVALID_JSON_TEXT), ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("42602", SqlState::INVALID_NAME), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("58P02", SqlState::DUPLICATE_FILE), ("42P22", SqlState::INDETERMINATE_COLLATION), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), + ("428C9", SqlState::GENERATED_ALWAYS), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("42712", SqlState::DUPLICATE_ALIAS), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("42710", SqlState::DUPLICATE_OBJECT), + ("54011", SqlState::TOO_MANY_COLUMNS), + ("42P19", SqlState::INVALID_RECURSION), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("P0002", SqlState::NO_DATA_FOUND), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("22012", SqlState::DIVISION_BY_ZERO), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("F0001", SqlState::LOCK_FILE_EXISTS), ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("42P07", SqlState::DUPLICATE_TABLE), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("P0000", SqlState::PLPGSQL_ERROR), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("23505", SqlState::UNIQUE_VIOLATION), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("P0004", SqlState::ASSERT_FAILURE), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("00000", SqlState::SUCCESSFUL_COMPLETION), ("58000", SqlState::SYSTEM_ERROR), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("42622", SqlState::NAME_TOO_LONG), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("42846", SqlState::CANNOT_COERCE), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), ("20000", SqlState::CASE_NOT_FOUND), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("22000", SqlState::DATA_EXCEPTION), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("55006", SqlState::OBJECT_IN_USE), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("2200N", SqlState::INVALID_XML_CONTENT), + ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("08006", SqlState::CONNECTION_FAILURE), ("42P01", SqlState::UNDEFINED_TABLE), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("0L000", SqlState::INVALID_GRANTOR), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("42622", SqlState::NAME_TOO_LONG), + ("57P04", SqlState::DATABASE_DROPPED), + ("42803", SqlState::GROUPING_ERROR), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("23514", SqlState::CHECK_VIOLATION), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("3F000", SqlState::INVALID_SCHEMA_NAME), ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("22000", SqlState::DATA_EXCEPTION), - ("28P01", SqlState::INVALID_PASSWORD), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("42P21", SqlState::COLLATION_MISMATCH), - ("42703", SqlState::UNDEFINED_COLUMN), - ("57P04", SqlState::DATABASE_DROPPED), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("23001", SqlState::RESTRICT_VIOLATION), ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("42P20", SqlState::WINDOWING_ERROR), - ("24000", SqlState::INVALID_CURSOR_STATE), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), + ("28P01", SqlState::INVALID_PASSWORD), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("P0001", SqlState::RAISE_EXCEPTION), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("42704", SqlState::UNDEFINED_OBJECT), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("42701", SqlState::DUPLICATE_COLUMN), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), ("22011", SqlState::SUBSTRING_ERROR), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("P0000", SqlState::PLPGSQL_ERROR), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), ]), }; From fbc42ecbe88eb1712a3c3650f295e9d84d09df5a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 26 Dec 2020 15:51:22 -0500 Subject: [PATCH 476/819] Stop setting timezone to UTC Closes #147 --- tokio-postgres/src/connect_raw.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index d07d5a2df..3c6658481 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -111,7 +111,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { - let mut params = vec![("client_encoding", "UTF8"), ("timezone", "UTC")]; + let mut params = vec![("client_encoding", "UTF8")]; if let Some(user) = &config.user { params.push(("user", &**user)); } From e2d327399b6ff423a1921fdb4f41b4a584386bac Mon Sep 17 00:00:00 2001 From: Bernardo Uriarte Blanco Date: Sat, 26 Dec 2020 23:17:04 +0100 Subject: [PATCH 477/819] add `client` method to `GenericClient` --- tokio-postgres/src/generic_client.rs | 11 +++++++++++ tokio-postgres/src/transaction.rs | 5 +++++ 2 files changed, 16 insertions(+) diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index df2c6b842..911a587b6 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -12,6 +12,9 @@ mod private { /// This trait is "sealed", and cannot be implemented outside of this crate. #[async_trait] pub trait GenericClient: private::Sealed { + /// Get a reference to the underlying `Client` + fn client(&self) -> &Client; + /// Like `Client::execute`. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where @@ -74,6 +77,10 @@ impl private::Sealed for Client {} #[async_trait] impl GenericClient for Client { + fn client(&self) -> &Client { + self + } + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -152,6 +159,10 @@ impl private::Sealed for Transaction<'_> {} #[async_trait] #[allow(clippy::needless_lifetimes)] impl GenericClient for Transaction<'_> { + fn client(&self) -> &Client { + self.client() + } + async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 45e9cc3aa..cf39d9186 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -64,6 +64,11 @@ impl<'a> Transaction<'a> { } } + /// Get a reference to the underlying `Client` + pub fn client(&self) -> &Client { + &self.client + } + /// Consumes the transaction, committing all changes made within it. pub async fn commit(mut self) -> Result<(), Error> { self.done = true; From e29439a5590157e3aedb373c33b96ccda6b3b959 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 30 Dec 2020 16:23:45 -0500 Subject: [PATCH 478/819] Delete FUNDING.yml --- .github/FUNDING.yml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index b214efc24..000000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -github: [sfackler] From 7537e8a91819fa66d1590811e34a9c59b948b1af Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Fri, 8 Jan 2021 13:35:45 -0500 Subject: [PATCH 479/819] postgres-protocol: use RustCrypto md-5 crate Swap the md5 crate for the md-5 crate. Despite the latter's somewhat more suspicious name, it is part of the wider RustCrypto ecosystem, and shares code with the sha2 crate that postgres-protocol already uses. --- postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/authentication/mod.rs | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 6746cfebd..1224c633d 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -14,7 +14,7 @@ byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" hmac = "0.10" -md5 = "0.7" +md-5 = "0.9" memchr = "2.0" rand = "0.8" sha2 = "0.9" diff --git a/postgres-protocol/src/authentication/mod.rs b/postgres-protocol/src/authentication/mod.rs index edacb46e7..9cfd6034c 100644 --- a/postgres-protocol/src/authentication/mod.rs +++ b/postgres-protocol/src/authentication/mod.rs @@ -1,5 +1,5 @@ //! Authentication protocol support. -use md5::Context; +use md5::{Digest, Md5}; pub mod sasl; @@ -10,14 +10,13 @@ pub mod sasl; /// `PasswordMessage` message. #[inline] pub fn md5_hash(username: &[u8], password: &[u8], salt: [u8; 4]) -> String { - let mut context = Context::new(); - context.consume(password); - context.consume(username); - let output = context.compute(); - context = Context::new(); - context.consume(format!("{:x}", output)); - context.consume(&salt); - format!("md5{:x}", context.compute()) + let mut md5 = Md5::new(); + md5.update(password); + md5.update(username); + let output = md5.finalize_reset(); + md5.update(format!("{:x}", output)); + md5.update(&salt); + format!("md5{:x}", md5.finalize()) } #[cfg(test)] From f3cbc8ce0431efb1196727a7e6016a5928dcbd55 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Mon, 14 Dec 2020 11:56:21 -0800 Subject: [PATCH 480/819] PgLsn type. --- postgres-protocol/src/lib.rs | 3 + postgres-protocol/src/types/mod.rs | 18 +++++- postgres-types/src/lib.rs | 4 ++ postgres-types/src/pg_lsn.rs | 79 ++++++++++++++++++++++++++ tokio-postgres/tests/test/types/mod.rs | 15 ++++- 5 files changed, 117 insertions(+), 2 deletions(-) create mode 100644 postgres-types/src/pg_lsn.rs diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index b4374afa1..13de51e71 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -24,6 +24,9 @@ pub mod types; /// A Postgres OID. pub type Oid = u32; +/// A Postgres Log Sequence Number (LSN). +pub type Lsn = u64; + /// An enum indicating if a value is `NULL` or not. pub enum IsNull { /// The value is `NULL`. diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 621c01cc2..436132c2c 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -8,7 +8,7 @@ use std::io::Read; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; -use crate::{write_nullable, FromUsize, IsNull, Oid}; +use crate::{write_nullable, FromUsize, IsNull, Lsn, Oid}; #[cfg(test)] mod test; @@ -142,6 +142,22 @@ pub fn int8_from_sql(mut buf: &[u8]) -> Result Result> { + let v = buf.read_u64::()?; + if !buf.is_empty() { + return Err("invalid buffer size".into()); + } + Ok(v) +} + /// Serializes a `FLOAT4` value. #[inline] pub fn float4_to_sql(v: f32, buf: &mut BytesMut) { diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2909b81ef..e7cc781cd 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -130,6 +130,9 @@ use crate::type_gen::{Inner, Other}; #[doc(inline)] pub use postgres_protocol::Oid; +#[doc(inline)] +pub use pg_lsn::PgLsn; + pub use crate::special::{Date, Timestamp}; use bytes::BytesMut; @@ -204,6 +207,7 @@ mod uuid_08; #[cfg(feature = "with-time-0_2")] extern crate time_02 as time; +mod pg_lsn; #[doc(hidden)] pub mod private; mod special; diff --git a/postgres-types/src/pg_lsn.rs b/postgres-types/src/pg_lsn.rs new file mode 100644 index 000000000..f0bbf4022 --- /dev/null +++ b/postgres-types/src/pg_lsn.rs @@ -0,0 +1,79 @@ +//! Log Sequence Number (LSN) type for PostgreSQL Write-Ahead Log +//! (WAL), also known as the transaction log. + +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; +use std::fmt; +use std::str::FromStr; + +use crate::{FromSql, IsNull, ToSql, Type}; + +/// Postgres `PG_LSN` type. +#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] +pub struct PgLsn(u64); + +/// Error parsing LSN. +#[derive(Debug)] +pub struct ParseLsnError(()); + +impl From for PgLsn { + fn from(lsn_u64: u64) -> Self { + PgLsn(lsn_u64) + } +} + +impl From for u64 { + fn from(lsn: PgLsn) -> u64 { + lsn.0 + } +} + +impl FromStr for PgLsn { + type Err = ParseLsnError; + + fn from_str(lsn_str: &str) -> Result { + let split: Vec<&str> = lsn_str.split('/').collect(); + if split.len() == 2 { + let (hi, lo) = ( + u64::from_str_radix(split[0], 16).map_err(|_| ParseLsnError(()))?, + u64::from_str_radix(split[1], 16).map_err(|_| ParseLsnError(()))?, + ); + Ok(PgLsn((hi << 32) | lo)) + } else { + Err(ParseLsnError(())) + } + } +} + +impl fmt::Display for PgLsn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:X}/{:X}", self.0 >> 32, self.0 & 0x00000000ffffffff) + } +} + +impl fmt::Debug for PgLsn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("{}", self)) + } +} + +impl<'a> FromSql<'a> for PgLsn { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { + let v = types::lsn_from_sql(raw)?; + Ok(v.into()) + } + + accepts!(PG_LSN); +} + +impl ToSql for PgLsn { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::lsn_to_sql((*self).into(), out); + Ok(IsNull::No) + } + + accepts!(PG_LSN); + + to_sql_checked!(); +} diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 168ca3a4d..c1480bf84 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -6,8 +6,9 @@ use std::f64; use std::fmt; use std::net::IpAddr; use std::result; +use std::str::FromStr; use std::time::{Duration, UNIX_EPOCH}; -use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, ToSql, Type, WrongType}; +use tokio_postgres::types::{FromSql, FromSqlOwned, IsNull, Kind, PgLsn, ToSql, Type, WrongType}; use crate::connect; use bytes::BytesMut; @@ -135,6 +136,18 @@ async fn test_i64_params() { .await; } +#[tokio::test] +async fn test_lsn_params() { + test_type( + "PG_LSN", + &[ + (Some(PgLsn::from_str("2B/1757980").unwrap()), "'2B/1757980'"), + (None, "NULL"), + ], + ) + .await +} + #[tokio::test] async fn test_f32_params() { test_type( From e4e48a06b79efab2ecfba6d40b2dbc1d9eeffb36 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Thu, 7 Jan 2021 18:25:53 -0800 Subject: [PATCH 481/819] Add password module for hashing on the client side. Hashing a password on the client side is useful so that you can set a user's password without ever sending it in plain text to the server. This avoids leaking passwords in the log or elsewhere. --- postgres-protocol/src/authentication/sasl.rs | 2 +- postgres-protocol/src/lib.rs | 1 + postgres-protocol/src/password/mod.rs | 104 +++++++++++++++++++ postgres-protocol/src/password/test.rs | 19 ++++ 4 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 postgres-protocol/src/password/mod.rs create mode 100644 postgres-protocol/src/password/test.rs diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index d95471133..7c0d1754f 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -32,7 +32,7 @@ fn normalize(pass: &[u8]) -> Vec { } } -fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { +pub(crate) fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { let mut hmac = Hmac::::new_varkey(str).expect("HMAC is able to accept all key sizes"); hmac.update(salt); hmac.update(&[0, 0, 0, 1]); diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 13de51e71..c9fe0ee19 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -19,6 +19,7 @@ use std::io; pub mod authentication; pub mod escape; pub mod message; +pub mod password; pub mod types; /// A Postgres OID. diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs new file mode 100644 index 000000000..ccb95c79b --- /dev/null +++ b/postgres-protocol/src/password/mod.rs @@ -0,0 +1,104 @@ +//! Functions to encrypt a password in the client. +//! +//! This is intended to be used by client applications that wish to +//! send commands like `ALTER USER joe PASSWORD 'pwd'`. The password +//! need not be sent in cleartext if it is encrypted on the client +//! side. This is good because it ensures the cleartext password won't +//! end up in logs pg_stat displays, etc. + +use crate::authentication::sasl; +use hmac::{Hmac, Mac, NewMac}; +use md5::Md5; +use rand::RngCore; +use sha2::digest::FixedOutput; +use sha2::{Digest, Sha256}; + +#[cfg(test)] +mod test; + +const SCRAM_DEFAULT_ITERATIONS: u32 = 4096; +const SCRAM_DEFAULT_SALT_LEN: usize = 16; + +/// Hash password using SCRAM-SHA-256 with a randomly-generated +/// salt. +/// +/// The client may assume the returned string doesn't contain any +/// special characters that would require escaping in an SQL command. +pub fn scram_sha_256(password: &[u8]) -> String { + let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN]; + let mut rng = rand::thread_rng(); + rng.fill_bytes(&mut salt); + scram_sha_256_salt(password, salt) +} + +// Internal implementation of scram_sha_256 with a caller-provided +// salt. This is useful for testing. +pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_LEN]) -> String { + // Prepare the password, per [RFC + // 4013](https://tools.ietf.org/html/rfc4013), if possible. + // + // Postgres treats passwords as byte strings (without embedded NUL + // bytes), but SASL expects passwords to be valid UTF-8. + // + // Follow the behavior of libpq's PQencryptPasswordConn(), and + // also the backend. If the password is not valid UTF-8, or if it + // contains prohibited characters (such as non-ASCII whitespace), + // just skip the SASLprep step and use the original byte + // sequence. + let prepared: Vec = match std::str::from_utf8(password) { + Ok(password_str) => { + match stringprep::saslprep(password_str) { + Ok(p) => p.into_owned().into_bytes(), + // contains invalid characters; skip saslprep + Err(_) => Vec::from(password), + } + } + // not valid UTF-8; skip saslprep + Err(_) => Vec::from(password), + }; + + // salt password + let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS); + + // client key + let mut hmac = + Hmac::::new_varkey(&salted_password).expect("HMAC is able to accept all key sizes"); + hmac.update(b"Client Key"); + let client_key = hmac.finalize().into_bytes(); + + // stored key + let mut hash = Sha256::default(); + hash.update(client_key.as_slice()); + let stored_key = hash.finalize_fixed(); + + // server key + let mut hmac = + Hmac::::new_varkey(&salted_password).expect("HMAC is able to accept all key sizes"); + hmac.update(b"Server Key"); + let server_key = hmac.finalize().into_bytes(); + + format!( + "SCRAM-SHA-256${}:{}${}:{}", + SCRAM_DEFAULT_ITERATIONS, + base64::encode(salt), + base64::encode(stored_key), + base64::encode(server_key) + ) +} + +/// **Not recommended, as MD5 is not considered to be secure.** +/// +/// Hash password using MD5 with the username as the salt. +/// +/// The client may assume the returned string doesn't contain any +/// special characters that would require escaping. +pub fn md5(password: &[u8], username: &str) -> String { + // salt password with username + let mut salted_password = Vec::from(password); + salted_password.extend_from_slice(username.as_bytes()); + + let mut hash = Md5::new(); + hash.update(&salted_password); + let digest = hash.finalize(); + format!("md5{:x}", digest) +} diff --git a/postgres-protocol/src/password/test.rs b/postgres-protocol/src/password/test.rs new file mode 100644 index 000000000..1432cb204 --- /dev/null +++ b/postgres-protocol/src/password/test.rs @@ -0,0 +1,19 @@ +use crate::password; + +#[test] +fn test_encrypt_scram_sha_256() { + // Specify the salt to make the test deterministic. Any bytes will do. + let salt: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + assert_eq!( + password::scram_sha_256_salt(b"secret", salt), + "SCRAM-SHA-256$4096:AQIDBAUGBwgJCgsMDQ4PEA==$8rrDg00OqaiWXJ7p+sCgHEIaBSHY89ZJl3mfIsf32oY=:05L1f+yZbiN8O0AnO40Og85NNRhvzTS57naKRWCcsIA=" + ); +} + +#[test] +fn test_encrypt_md5() { + assert_eq!( + password::md5(b"secret", "foo"), + "md54ab2c5d00339c4b2a4e921d2dc4edec7" + ); +} From 41375ebd3c676076920a2d644f292618d7ef8dcc Mon Sep 17 00:00:00 2001 From: Lachezar Lechev <8925621+elpiel@users.noreply.github.com> Date: Wed, 13 Jan 2021 11:53:29 +0200 Subject: [PATCH 482/819] Update tokio version in feature docs --- tokio-postgres/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 6e73b883d..e477687ac 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -103,7 +103,7 @@ //! //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | -//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.3 with the features `net` and `time` | yes | +//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 1.0 with the features `net` and `time` | yes | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | From cc6a0ada01bcda2ed16995939d9305ea6dc26a3a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 16 Jan 2021 14:19:14 -0500 Subject: [PATCH 483/819] Add back Error::as_db_error Closes #732 --- tokio-postgres/src/error/mod.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 3df529049..ee29a1dbb 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -414,14 +414,18 @@ impl Error { self.0.cause } + /// Returns the source of this error if it was a `DbError`. + /// + /// This is a simple convenience method. + pub fn as_db_error(&self) -> Option<&DbError> { + self.source().and_then(|e| e.downcast_ref::()) + } + /// Returns the SQLSTATE error code associated with the error. /// - /// This is a convenience method that downcasts the cause to a `DbError` - /// and returns its code. + /// This is a convenience method that downcasts the cause to a `DbError` and returns its code. pub fn code(&self) -> Option<&SqlState> { - self.source() - .and_then(|e| e.downcast_ref::()) - .map(DbError::code) + self.as_db_error().map(DbError::code) } fn new(kind: Kind, cause: Option>) -> Error { From 37fb39202a5892a39596b20fb4d0521ecf6aec70 Mon Sep 17 00:00:00 2001 From: Michael Kirk Date: Wed, 13 Jan 2021 21:34:45 -0600 Subject: [PATCH 484/819] Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature --- postgres-types/CHANGELOG.md | 6 ++ postgres-types/Cargo.toml | 4 +- .../src/{geo_types_06.rs => geo_types_07.rs} | 2 +- postgres-types/src/lib.rs | 4 +- postgres/CHANGELOG.md | 6 ++ postgres/Cargo.toml | 2 +- postgres/src/lib.rs | 3 +- tokio-postgres/CHANGELOG.md | 6 ++ tokio-postgres/Cargo.toml | 4 +- tokio-postgres/src/lib.rs | 3 +- .../tests/test/types/geo_types_04.rs | 60 ------------------- .../{geo_types_06.rs => geo_types_07.rs} | 3 +- tokio-postgres/tests/test/types/mod.rs | 6 +- 13 files changed, 32 insertions(+), 77 deletions(-) rename postgres-types/src/{geo_types_06.rs => geo_types_07.rs} (97%) delete mode 100644 tokio-postgres/tests/test/types/geo_types_04.rs rename tokio-postgres/tests/test/types/{geo_types_06.rs => geo_types_07.rs} (92%) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 4fb55631b..9f9da7989 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. + ## v0.2.0 - 2020-12-25 ### Changed diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 40edc621b..ce5b0ab06 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -15,7 +15,7 @@ derive = ["postgres-derive"] with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] -with-geo-types-0_6 = ["geo-types-06"] +with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] with-time-0_2 = ["time-02"] @@ -29,7 +29,7 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-types-06 = { version = "0.6", package = "geo-types", optional = true } +geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/geo_types_06.rs b/postgres-types/src/geo_types_07.rs similarity index 97% rename from postgres-types/src/geo_types_06.rs rename to postgres-types/src/geo_types_07.rs index 0f0b14fd9..7dfb51056 100644 --- a/postgres-types/src/geo_types_06.rs +++ b/postgres-types/src/geo_types_07.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use geo_types_06::{Coordinate, LineString, Point, Rect}; +use geo_types_0_7::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index e7cc781cd..d4595091a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -194,8 +194,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-types-0_6")] -mod geo_types_06; +#[cfg(feature = "with-geo-types-0_7")] +mod geo_types_07; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 2376068a8..66e66c6fb 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. + ## v0.19.0 - 2020-12-25 ### Changed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ed63d0c1a..7916a5e0d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -24,7 +24,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] -with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] +with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index f25fe1175..fe0c1f6c3 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -56,8 +56,7 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the 0.4 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.4.0) 0.4 | no | -//! | `with-geo-types-0_5` | Enable support for the 0.5 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.5.0) 0.5 | no | +//! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 7d513daa2..db984ca18 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. + ## v0.7.0 - 2020-12-25 ### Changed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 613af127f..47e371675 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -30,7 +30,7 @@ runtime = ["tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] -with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] +with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-time-0_2 = ["postgres-types/with-time-0_2"] @@ -60,7 +60,7 @@ criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-04 = { version = "0.4", package = "eui48" } -geo-types-06 = { version = "0.6", package = "geo-types" } +geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index e477687ac..d62b8631d 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -107,8 +107,7 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the 0.4 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.4.0) 0.4 | no | -//! | `with-geo-types-0_5` | Enable support for the 0.5 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.5.0) 0.5 | no | +//! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | diff --git a/tokio-postgres/tests/test/types/geo_types_04.rs b/tokio-postgres/tests/test/types/geo_types_04.rs deleted file mode 100644 index a4f0ac6f6..000000000 --- a/tokio-postgres/tests/test/types/geo_types_04.rs +++ /dev/null @@ -1,60 +0,0 @@ -use geo_types_04::{Coordinate, LineString, Point, Rect}; - -use crate::types::test_type; - -#[tokio::test] -async fn test_point_params() { - test_type( - "POINT", - &[ - (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), - (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), - (None, "NULL"), - ], - ) - .await; -} - -#[tokio::test] -async fn test_box_params() { - test_type( - "BOX", - &[ - ( - Some(Rect { - min: Coordinate { x: -3.2, y: 1.618 }, - max: Coordinate { - x: 160.0, - y: 69701.5615, - }, - }), - "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", - ), - (None, "NULL"), - ], - ) - .await; -} - -#[tokio::test] -async fn test_path_params() { - let points = vec![ - Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.2, y: 1.618 }, - Coordinate { - x: 160.0, - y: 69701.5615, - }, - ]; - test_type( - "PATH", - &[ - ( - Some(LineString(points)), - "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", - ), - (None, "NULL"), - ], - ) - .await; -} diff --git a/tokio-postgres/tests/test/types/geo_types_06.rs b/tokio-postgres/tests/test/types/geo_types_07.rs similarity index 92% rename from tokio-postgres/tests/test/types/geo_types_06.rs rename to tokio-postgres/tests/test/types/geo_types_07.rs index 7195abc06..85ff2553a 100644 --- a/tokio-postgres/tests/test/types/geo_types_06.rs +++ b/tokio-postgres/tests/test/types/geo_types_07.rs @@ -1,4 +1,5 @@ -use geo_types_06::{Coordinate, LineString, Point, Rect}; +#[cfg(feature = "with-geo-types-0_7")] +use geo_types_07::{Coordinate, LineString, Point, Rect}; use crate::types::test_type; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index c1480bf84..abf058297 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -19,10 +19,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-types-0_4")] -mod geo_types_04; -#[cfg(feature = "with-geo-types-0_6")] -mod geo_types_06; +#[cfg(feature = "with-geo-types-0_7")] +mod geo_types_07; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-time-0_2")] From 8b8491f31dc55e5cdc286c616a9900af622f9fa7 Mon Sep 17 00:00:00 2001 From: Michael Kirk Date: Mon, 25 Jan 2021 14:49:06 -0600 Subject: [PATCH 485/819] retain support for geo-types-0.6 --- postgres-types/Cargo.toml | 2 + postgres-types/src/geo_types_06.rs | 72 +++++++++++++++++++ postgres-types/src/lib.rs | 2 + postgres/Cargo.toml | 1 + postgres/src/lib.rs | 1 + tokio-postgres/Cargo.toml | 2 + tokio-postgres/src/lib.rs | 1 + .../tests/test/types/geo_types_06.rs | 60 ++++++++++++++++ tokio-postgres/tests/test/types/mod.rs | 2 + 9 files changed, 143 insertions(+) create mode 100644 postgres-types/src/geo_types_06.rs create mode 100644 tokio-postgres/tests/test/types/geo_types_06.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index ce5b0ab06..7d48596ae 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -15,6 +15,7 @@ derive = ["postgres-derive"] with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] +with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] @@ -29,6 +30,7 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } +geo-types-06 = { version = "0.6", package = "geo-types", optional = true } geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } diff --git a/postgres-types/src/geo_types_06.rs b/postgres-types/src/geo_types_06.rs new file mode 100644 index 000000000..0f0b14fd9 --- /dev/null +++ b/postgres-types/src/geo_types_06.rs @@ -0,0 +1,72 @@ +use bytes::BytesMut; +use fallible_iterator::FallibleIterator; +use geo_types_06::{Coordinate, LineString, Point, Rect}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Point { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let point = types::point_from_sql(raw)?; + Ok(Point::new(point.x(), point.y())) + } + + accepts!(POINT); +} + +impl ToSql for Point { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::point_to_sql(self.x(), self.y(), out); + Ok(IsNull::No) + } + + accepts!(POINT); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Rect { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let rect = types::box_from_sql(raw)?; + Ok(Rect::new( + (rect.lower_left().x(), rect.lower_left().y()), + (rect.upper_right().x(), rect.upper_right().y()), + )) + } + + accepts!(BOX); +} + +impl ToSql for Rect { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::box_to_sql(self.min().x, self.min().y, self.max().x, self.max().y, out); + Ok(IsNull::No) + } + + accepts!(BOX); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for LineString { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let path = types::path_from_sql(raw)?; + let points = path + .points() + .map(|p| Ok(Coordinate { x: p.x(), y: p.y() })) + .collect()?; + Ok(LineString(points)) + } + + accepts!(PATH); +} + +impl ToSql for LineString { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + let closed = false; // always encode an open path from LineString + types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; + Ok(IsNull::No) + } + + accepts!(PATH); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index d4595091a..e9a7c10a2 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -194,6 +194,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; +#[cfg(feature = "with-geo-types-0_6")] +mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] mod geo_types_07; #[cfg(feature = "with-serde_json-1")] diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 7916a5e0d..0128f8a4e 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -24,6 +24,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] +with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index fe0c1f6c3..4513aeef7 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -56,6 +56,7 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 47e371675..254ebe62e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -30,6 +30,7 @@ runtime = ["tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] +with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] @@ -60,6 +61,7 @@ criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-04 = { version = "0.4", package = "eui48" } +geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d62b8631d..77713bb11 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -107,6 +107,7 @@ //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | diff --git a/tokio-postgres/tests/test/types/geo_types_06.rs b/tokio-postgres/tests/test/types/geo_types_06.rs new file mode 100644 index 000000000..7195abc06 --- /dev/null +++ b/tokio-postgres/tests/test/types/geo_types_06.rs @@ -0,0 +1,60 @@ +use geo_types_06::{Coordinate, LineString, Point, Rect}; + +use crate::types::test_type; + +#[tokio::test] +async fn test_point_params() { + test_type( + "POINT", + &[ + (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), + (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_box_params() { + test_type( + "BOX", + &[ + ( + Some(Rect::new( + Coordinate { x: -3.2, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, + )), + "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", + ), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_path_params() { + let points = vec![ + Coordinate { x: 0., y: 0. }, + Coordinate { x: -3.2, y: 1.618 }, + Coordinate { + x: 160.0, + y: 69701.5615, + }, + ]; + test_type( + "PATH", + &[ + ( + Some(LineString(points)), + "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", + ), + (None, "NULL"), + ], + ) + .await; +} diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index abf058297..11d128764 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -19,6 +19,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; +#[cfg(feature = "with-geo-types-0_6")] +mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] mod geo_types_07; #[cfg(feature = "with-serde_json-1")] From 77cfee0da1fff2d90c61e3033b962057ccd2ba3b Mon Sep 17 00:00:00 2001 From: George London Date: Fri, 5 Feb 2021 19:49:36 -0800 Subject: [PATCH 486/819] Fix minor docstring typo --- postgres/src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c052836f0..0ff2376e1 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -407,8 +407,8 @@ impl Client { /// /// # Warning /// - /// Prepared statements should be use for any query which contains user-specified data, as they provided the - /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass + /// Prepared statements should be used for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.connection.block_on(self.client.simple_query(query)) From 85a6c95a695d645933a7569206ba823dd78fc725 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 16 Mar 2021 20:24:16 -0400 Subject: [PATCH 487/819] switch CI to github actions --- .circleci/config.yml | 42 -------------------- .github/workflows/ci.yml | 83 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 42 deletions(-) delete mode 100644 .circleci/config.yml create mode 100644 .github/workflows/ci.yml diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 8038a2c0f..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,42 +0,0 @@ -restore_registry: &RESTORE_REGISTRY - restore_cache: - key: registry -save_registry: &SAVE_REGISTRY - save_cache: - key: registry-{{ .BuildNum }} - paths: - - /usr/local/cargo/registry/index -deps_key: &DEPS_KEY - key: deps-{{ checksum "~/rust-version" }}-{{ checksum "Cargo.lock" }} -restore_deps: &RESTORE_DEPS - restore_cache: - <<: *DEPS_KEY -save_deps: &SAVE_DEPS - save_cache: - <<: *DEPS_KEY - paths: - - target - - /usr/local/cargo/registry/cache - -version: 2 -jobs: - build: - docker: - - image: rust:1.45.0 - environment: - RUSTFLAGS: -D warnings - - image: sfackler/rust-postgres-test:6 - steps: - - checkout - - run: rustup component add rustfmt clippy - - *RESTORE_REGISTRY - - run: cargo generate-lockfile - - *SAVE_REGISTRY - - run: rustc --version > ~/rust-version - - *RESTORE_DEPS - - run: cargo fmt --all -- --check - - run: cargo clippy --all --all-targets --all-features - - run: cargo test --all - - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features - - *SAVE_DEPS diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..7191eb9e4 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,83 @@ +name: CI + +on: + pull_request: + branches: + - master + push: + branches: + - master + +env: + RUSTFLAGS: -Dwarnings + RUST_BACKTRACE: 1 + +jobs: + rustfmt: + name: rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: sfackler/actions/rustup@master + - uses: sfackler/actions/rustfmt@master + + clippy: + name: clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: sfackler/actions/rustup@master + - run: echo "::set-output name=version::$(rustc --version)" + id: rust-version + - uses: actions/cache@v1 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v1 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v1 + with: + path: target + key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + - run: cargo clippy --all --all-targets + + test: + name: test + runs-on: ubuntu-latest + services: + postgres: + image: sfackler/rust-postgres-test:6 + ports: + - 5433:5433 + steps: + - uses: actions/checkout@v2 + - uses: sfackler/actions/rustup@master + with: + version: 1.45.0 + - run: echo "::set-output name=version::$(rustc --version)" + id: rust-version + - uses: actions/cache@v1 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v1 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v1 + with: + path: target + key: test-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + - run: cargo test --all + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features From ad2c8cf592899aed0e5d141af4411897d310e559 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 16 Mar 2021 20:32:26 -0400 Subject: [PATCH 488/819] clippy --- postgres-protocol/src/types/mod.rs | 16 +++++++--------- tokio-postgres/src/config.rs | 8 ++++---- tokio-postgres/src/connection.rs | 3 ++- tokio-postgres/tests/test/types/mod.rs | 6 +----- 4 files changed, 14 insertions(+), 19 deletions(-) diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 436132c2c..a595f5a30 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -231,9 +231,9 @@ fn write_pascal_string(s: &str, buf: &mut BytesMut) -> Result<(), StdBox( - mut buf: &'a [u8], -) -> Result, StdBox> { +pub fn hstore_from_sql( + mut buf: &[u8], +) -> Result, StdBox> { let count = buf.read_i32::()?; if count < 0 { return Err("invalid entry count".into()); @@ -319,9 +319,7 @@ where /// Deserializes a `VARBIT` or `BIT` value. #[inline] -pub fn varbit_from_sql<'a>( - mut buf: &'a [u8], -) -> Result, StdBox> { +pub fn varbit_from_sql(mut buf: &[u8]) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { return Err("invalid varbit length: varbit < 0".into()); @@ -508,7 +506,7 @@ where /// Deserializes an array value. #[inline] -pub fn array_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn array_from_sql(mut buf: &[u8]) -> Result, StdBox> { let dimensions = buf.read_i32::()?; if dimensions < 0 { return Err("invalid dimension count".into()); @@ -738,7 +736,7 @@ pub enum RangeBound { /// Deserializes a range value. #[inline] -pub fn range_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn range_from_sql(mut buf: &[u8]) -> Result, StdBox> { let tag = buf.read_u8()?; if tag == RANGE_EMPTY { @@ -911,7 +909,7 @@ where /// Deserializes a Postgres path. #[inline] -pub fn path_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn path_from_sql(mut buf: &[u8]) -> Result, StdBox> { let closed = buf.read_u8()? != 0; let points = buf.read_i32::()?; diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index da171cc79..111487173 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -760,8 +760,8 @@ impl<'a> UrlParser<'a> { fn remove_url_prefix(s: &str) -> Option<&str> { for prefix in &["postgres://", "postgresql://"] { - if s.starts_with(prefix) { - return Some(&s[prefix.len()..]); + if let Some(stripped) = s.strip_prefix(prefix) { + return Some(stripped); } } @@ -825,8 +825,8 @@ impl<'a> UrlParser<'a> { let host = &chunk[1..idx]; let remaining = &chunk[idx + 1..]; - let port = if remaining.starts_with(':') { - Some(&remaining[1..]) + let port = if let Some(port) = remaining.strip_prefix(':') { + Some(port) } else if remaining.is_empty() { None } else { diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 9c8e369f1..b6805f76c 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -200,9 +200,10 @@ where return Ok(false); } - if let Poll::Pending = Pin::new(&mut self.stream) + if Pin::new(&mut self.stream) .poll_ready(cx) .map_err(Error::io)? + .is_pending() { trace!("poll_write: waiting on socket"); return Ok(false); diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 11d128764..bc31ece71 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -483,11 +483,7 @@ async fn domain() { } fn accepts(ty: &Type) -> bool { - ty.name() == "session_id" - && match *ty.kind() { - Kind::Domain(_) => true, - _ => false, - } + ty.name() == "session_id" && matches!(ty.kind(), Kind::Domain(_)) } to_sql_checked!(); From ead071b87c5468a5208b4f0b262977e29a0d36fd Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 16 Mar 2021 20:48:02 -0400 Subject: [PATCH 489/819] Remove readme badge --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 3af068174..b81a6716f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ # Rust-Postgres -[![CircleCI](https://circleci.com/gh/sfackler/rust-postgres.svg?style=shield)](https://circleci.com/gh/sfackler/rust-postgres) PostgreSQL support for Rust. From 49db9cd33c3e8deea195d43c2c0296baa48601bb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 18 Mar 2021 20:39:43 -0400 Subject: [PATCH 490/819] upgrade socket2 --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/connect_socket.rs | 40 ++++++++++------------------ 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 254ebe62e..c03f87080 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -49,7 +49,7 @@ pin-project-lite = "0.2" phf = "0.8" postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } postgres-types = { version = "0.2.0", path = "../postgres-types" } -socket2 = "0.3" +socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.6", features = ["codec"] } diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 564677b05..474676908 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,17 +1,12 @@ use crate::config::Host; use crate::{Error, Socket}; -use socket2::{Domain, Protocol, Type}; +use socket2::{SockRef, TcpKeepalive}; use std::future::Future; use std::io; -use std::net::SocketAddr; -#[cfg(unix)] -use std::os::unix::io::{FromRawFd, IntoRawFd}; -#[cfg(windows)] -use std::os::windows::io::{FromRawSocket, IntoRawSocket}; use std::time::Duration; #[cfg(unix)] use tokio::net::UnixStream; -use tokio::net::{self, TcpSocket}; +use tokio::net::{self, TcpStream}; use tokio::time; pub(crate) async fn connect_socket( @@ -30,30 +25,23 @@ pub(crate) async fn connect_socket( let mut last_err = None; for addr in addrs { - let domain = match addr { - SocketAddr::V4(_) => Domain::ipv4(), - SocketAddr::V6(_) => Domain::ipv6(), - }; + let stream = + match connect_with_timeout(TcpStream::connect(addr), connect_timeout).await { + Ok(stream) => stream, + Err(e) => { + last_err = Some(e); + continue; + } + }; - let socket = socket2::Socket::new(domain, Type::stream(), Some(Protocol::tcp())) - .map_err(Error::connect)?; - socket.set_nonblocking(true).map_err(Error::connect)?; - socket.set_nodelay(true).map_err(Error::connect)?; + stream.set_nodelay(true).map_err(Error::connect)?; if keepalives { - socket - .set_keepalive(Some(keepalives_idle)) + SockRef::from(&stream) + .set_tcp_keepalive(&TcpKeepalive::new().with_time(keepalives_idle)) .map_err(Error::connect)?; } - #[cfg(unix)] - let socket = unsafe { TcpSocket::from_raw_fd(socket.into_raw_fd()) }; - #[cfg(windows)] - let socket = unsafe { TcpSocket::from_raw_socket(socket.into_raw_socket()) }; - - match connect_with_timeout(socket.connect(addr), connect_timeout).await { - Ok(socket) => return Ok(Socket::new_tcp(socket)), - Err(e) => last_err = Some(e), - } + return Ok(Socket::new_tcp(stream)); } Err(last_err.unwrap_or_else(|| { From 39736a6244244bd74456eb94dc0e87a38e542331 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 18 Mar 2021 20:43:58 -0400 Subject: [PATCH 491/819] bump ci version to 1.46 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7191eb9e4..4a95dbe0c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: - uses: actions/checkout@v2 - uses: sfackler/actions/rustup@master with: - version: 1.45.0 + version: 1.46.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From 71cb6eef681ea6430cd723cd90fb45c90e6ac9d1 Mon Sep 17 00:00:00 2001 From: hansolshin Date: Sun, 28 Mar 2021 23:00:49 +0900 Subject: [PATCH 492/819] Add is_closed method to Error --- tokio-postgres/src/error/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index ee29a1dbb..47a31e793 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -421,6 +421,11 @@ impl Error { self.source().and_then(|e| e.downcast_ref::()) } + /// Determines if the error was associated with closed connection. + pub fn is_closed(&self) -> bool { + self.0.kind == Kind::Closed + } + /// Returns the SQLSTATE error code associated with the error. /// /// This is a convenience method that downcasts the cause to a `DbError` and returns its code. From af7825308d9808067dfa3af40571662d8c7a9592 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Mar 2021 20:28:18 -0400 Subject: [PATCH 493/819] fix clippy --- tokio-postgres/tests/test/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index bf6d72d3e..e9439162b 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -577,7 +577,7 @@ async fn notices() { .unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); @@ -612,7 +612,7 @@ async fn notifications() { let (client, mut connection) = connect_raw("user=postgres").await.unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); From fc10985f9fdf0903893109bc951fb5891539bf97 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 28 Mar 2021 20:31:59 -0400 Subject: [PATCH 494/819] rustfmt --- tokio-postgres/tests/test/main.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index e9439162b..c367dbea3 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -577,7 +577,8 @@ async fn notices() { .unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); + let stream = + stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); @@ -612,7 +613,8 @@ async fn notifications() { let (client, mut connection) = connect_raw("user=postgres").await.unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); + let stream = + stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); From 558c169b8910dd6b6ab129995ab71fcd0d67fb66 Mon Sep 17 00:00:00 2001 From: Dmitriy Pleshevskiy Date: Thu, 1 Apr 2021 23:39:50 +0300 Subject: [PATCH 495/819] feat: add method to clear types cache Closes #753 --- tokio-postgres/src/client.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 359a7cd16..f11e5736b 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -113,6 +113,10 @@ impl InnerClient { self.state.lock().types.insert(oid, type_.clone()); } + pub fn clear_types(&self) { + self.state.lock().types.clear(); + } + pub fn with_buf(&self, f: F) -> R where F: FnOnce(&mut BytesMut) -> R, @@ -176,6 +180,11 @@ impl Client { &self.inner } + /// Clears the cache of database types (domain, enum, composition) that are loaded when preparing a query. + pub fn clear_types_cache(&self) { + self.inner().clear_types() + } + #[cfg(feature = "runtime")] pub(crate) fn set_socket_config(&mut self, socket_config: SocketConfig) { self.socket_config = Some(socket_config); From 844a27a074c9905832752b736073a4a68d616c7a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Apr 2021 16:55:59 -0400 Subject: [PATCH 496/819] Add clear_type_cache to blocking client --- postgres/src/client.rs | 13 +++++++++++-- tokio-postgres/src/client.rs | 21 ++++++++++++--------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 0ff2376e1..29cac840d 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -501,8 +501,8 @@ impl Client { Notifications::new(self.connection.as_ref()) } - /// Constructs a cancellation token that can later be used to request - /// cancellation of a query running on this connection. + /// Constructs a cancellation token that can later be used to request cancellation of a query running on this + /// connection. /// /// # Examples /// @@ -541,6 +541,15 @@ impl Client { CancelToken::new(self.client.cancel_token()) } + /// Clears the client's type information cache. + /// + /// When user-defined types are used in a query, the client loads their definitions from the database and caches + /// them for the lifetime of the client. If those definitions are changed in the database, this method can be used + /// to flush the local cache and allow the new, updated definitions to be loaded. + pub fn clear_type_cache(&self) { + self.client.clear_type_cache(); + } + /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index f11e5736b..f19005e55 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -113,7 +113,7 @@ impl InnerClient { self.state.lock().types.insert(oid, type_.clone()); } - pub fn clear_types(&self) { + pub fn clear_type_cache(&self) { self.state.lock().types.clear(); } @@ -180,11 +180,6 @@ impl Client { &self.inner } - /// Clears the cache of database types (domain, enum, composition) that are loaded when preparing a query. - pub fn clear_types_cache(&self) { - self.inner().clear_types() - } - #[cfg(feature = "runtime")] pub(crate) fn set_socket_config(&mut self, socket_config: SocketConfig) { self.socket_config = Some(socket_config); @@ -493,9 +488,8 @@ impl Client { TransactionBuilder::new(self) } - /// Constructs a cancellation token that can later be used to request - /// cancellation of a query running on the connection associated with - /// this client. + /// Constructs a cancellation token that can later be used to request cancellation of a query running on the + /// connection associated with this client. pub fn cancel_token(&self) -> CancelToken { CancelToken { #[cfg(feature = "runtime")] @@ -532,6 +526,15 @@ impl Client { self.cancel_token().cancel_query_raw(stream, tls).await } + /// Clears the client's type information cache. + /// + /// When user-defined types are used in a query, the client loads their definitions from the database and caches + /// them for the lifetime of the client. If those definitions are changed in the database, this method can be used + /// to flush the local cache and allow the new, updated definitions to be loaded. + pub fn clear_type_cache(&self) { + self.inner().clear_type_cache(); + } + /// Determines if the connection to the server has already closed. /// /// In that case, all future queries will fail. From a598c52906a432cd0670d8aba60a1dd174484ed8 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Apr 2021 17:06:51 -0400 Subject: [PATCH 497/819] Release postgres-protocol v0.6.1 --- postgres-protocol/CHANGELOG.md | 11 +++++++++++ postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/lib.rs | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 4040c3ba2..7a51cb192 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,16 @@ # Change Log +## v0.6.1 - 2021-04-03 + +### Added + +* Added the `password` module, which can be used to hash passwords before using them in queries like `ALTER USER`. +* Added type conversions for `LSN`. + +### Changed + +* Moved from `md5` to `md-5`. + ## v0.6.0 - 2020-12-25 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 1224c633d..4fd288697 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.0" +version = "0.6.1" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index c9fe0ee19..8b6ff508d 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,7 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.5")] +#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")] #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; From 32524569c2dd0be0bbeb6ed8fd82fe2f33d77a01 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Apr 2021 17:10:03 -0400 Subject: [PATCH 498/819] Release postgres-types v0.2.1 --- postgres-types/CHANGELOG.md | 3 ++- postgres-types/Cargo.toml | 4 ++-- postgres-types/src/lib.rs | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 9f9da7989..83bc4d1fd 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,10 +1,11 @@ # Change Log -## Unreleased +## v0.2.1 - 2021-04-03 ### Added * Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added the `PgLsn` type, corresponding to `PG_LSN`. ## v0.2.0 - 2020-12-25 diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 7d48596ae..1d7f2cc9a 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.0" +version = "0.2.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -24,7 +24,7 @@ with-time-0_2 = ["time-02"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index e9a7c10a2..5c483bd76 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -105,7 +105,7 @@ //! Happy, //! } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-types/0.1")] +#![doc(html_root_url = "https://docs.rs/postgres-types/0.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] use fallible_iterator::FallibleIterator; From 57164c7e59f31be016980e7874fd7b8a4c92d40a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Apr 2021 17:12:50 -0400 Subject: [PATCH 499/819] Release tokio-postgres v0.7.1 --- tokio-postgres/CHANGELOG.md | 4 +++- tokio-postgres/Cargo.toml | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index db984ca18..7cc6c7071 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,10 +1,12 @@ # Change Log -## Unreleased +## v0.7.1 - 2020-04-03 ### Added * Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added `Client::clear_type_cache`. +* Added `Error::as_db_error` and `Error::is_closed`. ## v0.7.0 - 2020-12-25 diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index c03f87080..b1d093d4b 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.0" +version = "0.7.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -47,8 +47,8 @@ parking_lot = "0.11" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.8" -postgres-protocol = { version = "0.6.0", path = "../postgres-protocol" } -postgres-types = { version = "0.2.0", path = "../postgres-types" } +postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } +postgres-types = { version = "0.2.1", path = "../postgres-types" } socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.6", features = ["codec"] } From 2ab49f6c564f73eba1404c060e4ef5520ed6c798 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 3 Apr 2021 17:14:25 -0400 Subject: [PATCH 500/819] Release postgres v0.19.1 --- postgres/CHANGELOG.md | 3 ++- postgres/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 66e66c6fb..e68dedc5b 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,10 +1,11 @@ # Change Log -## Unreleased +## v0.19.1 - 2021-04-03 ### Added * Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added `Client::clear_type_cache`. ## v0.19.0 - 2020-12-25 diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 0128f8a4e..18219782d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.0" +version = "0.19.1" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -34,7 +34,7 @@ with-time-0_2 = ["tokio-postgres/with-time-0_2"] bytes = "1.0" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.7.0", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.1", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" From 50fa995cf9b0215a03585d358f27078086ed4bfc Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 5 Apr 2021 19:04:34 -0400 Subject: [PATCH 501/819] Reorder a bit --- tokio-postgres/src/generic_client.rs | 22 +++++++++++----------- tokio-postgres/src/transaction.rs | 10 +++++----- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 911a587b6..b2a907558 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -12,9 +12,6 @@ mod private { /// This trait is "sealed", and cannot be implemented outside of this crate. #[async_trait] pub trait GenericClient: private::Sealed { - /// Get a reference to the underlying `Client` - fn client(&self) -> &Client; - /// Like `Client::execute`. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where @@ -71,16 +68,15 @@ pub trait GenericClient: private::Sealed { /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error>; + + /// Returns a reference to the underlying `Client`. + fn client(&self) -> &Client; } impl private::Sealed for Client {} #[async_trait] impl GenericClient for Client { - fn client(&self) -> &Client { - self - } - async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -152,6 +148,10 @@ impl GenericClient for Client { async fn transaction(&mut self) -> Result, Error> { self.transaction().await } + + fn client(&self) -> &Client { + self + } } impl private::Sealed for Transaction<'_> {} @@ -159,10 +159,6 @@ impl private::Sealed for Transaction<'_> {} #[async_trait] #[allow(clippy::needless_lifetimes)] impl GenericClient for Transaction<'_> { - fn client(&self) -> &Client { - self.client() - } - async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -235,4 +231,8 @@ impl GenericClient for Transaction<'_> { async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await } + + fn client(&self) -> &Client { + self.client() + } } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index cf39d9186..7fadce069 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -64,11 +64,6 @@ impl<'a> Transaction<'a> { } } - /// Get a reference to the underlying `Client` - pub fn client(&self) -> &Client { - &self.client - } - /// Consumes the transaction, committing all changes made within it. pub async fn commit(mut self) -> Result<(), Error> { self.done = true; @@ -311,4 +306,9 @@ impl<'a> Transaction<'a> { done: false, }) } + + /// Returns a reference to the underlying `Client`. + pub fn client(&self) -> &Client { + &self.client + } } From c7a8adf2df589ee7add08f2aa5a56fcf3d5ee095 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 5 Apr 2021 19:07:27 -0400 Subject: [PATCH 502/819] remove unneeded ref --- tokio-postgres/src/transaction.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 7fadce069..a1aa7611f 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -309,6 +309,6 @@ impl<'a> Transaction<'a> { /// Returns a reference to the underlying `Client`. pub fn client(&self) -> &Client { - &self.client + self.client } } From 05a0643cc02e95a9c10ae7936e8941cbdb62e07f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 24 Apr 2021 10:19:23 -0400 Subject: [PATCH 503/819] Fix matching on SqlState Closes #756 --- codegen/src/sqlstate.rs | 68 +- tokio-postgres/src/error/sqlstate.rs | 1073 +++++++++++++++++++------- 2 files changed, 852 insertions(+), 289 deletions(-) diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index bb21be34f..ea3045654 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -1,5 +1,4 @@ use linked_hash_map::LinkedHashMap; -use phf_codegen; use std::fs::File; use std::io::{BufWriter, Write}; @@ -11,7 +10,9 @@ pub fn build() { let codes = parse_codes(); make_type(&mut file); + make_code(&codes, &mut file); make_consts(&codes, &mut file); + make_inner(&codes, &mut file); make_map(&codes, &mut file); } @@ -38,26 +39,51 @@ fn make_type(file: &mut BufWriter) { write!( file, "// Autogenerated file - DO NOT EDIT -use std::borrow::Cow; /// A SQLSTATE error code #[derive(PartialEq, Eq, Clone, Debug)] -pub struct SqlState(Cow<'static, str>); +pub struct SqlState(Inner); impl SqlState {{ /// Creates a `SqlState` from its error code. pub fn from_code(s: &str) -> SqlState {{ match SQLSTATE_MAP.get(s) {{ Some(state) => state.clone(), - None => SqlState(Cow::Owned(s.to_string())), + None => SqlState(Inner::Other(s.into())), }} }} +" + ) + .unwrap(); +} +fn make_code(codes: &LinkedHashMap>, file: &mut BufWriter) { + write!( + file, + r#" /// Returns the error code corresponding to the `SqlState`. pub fn code(&self) -> &str {{ - &self.0 + match &self.0 {{"#, + ) + .unwrap(); + + for code in codes.keys() { + write!( + file, + r#" + Inner::E{code} => "{code}","#, + code = code, + ) + .unwrap(); + } + + write!( + file, + r#" + Inner::Other(code) => code, + }} }} -" + "# ) .unwrap(); } @@ -69,7 +95,7 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< file, r#" /// {code} - pub const {name}: SqlState = SqlState(Cow::Borrowed("{code}")); + pub const {name}: SqlState = SqlState(Inner::E{code}); "#, name = name, code = code, @@ -81,6 +107,34 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< write!(file, "}}").unwrap(); } +fn make_inner(codes: &LinkedHashMap>, file: &mut BufWriter) { + write!( + file, + r#" + +#[derive(PartialEq, Eq, Clone, Debug)] +enum Inner {{"#, + ) + .unwrap(); + for code in codes.keys() { + write!( + file, + r#" + E{},"#, + code, + ) + .unwrap(); + } + write!( + file, + r#" + Other(Box), +}} + "#, + ) + .unwrap(); +} + fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter) { let mut builder = phf_codegen::Map::new(); for (code, names) in codes { diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 3a6ea0bdc..6505b51ce 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1,832 +1,1341 @@ // Autogenerated file - DO NOT EDIT -use std::borrow::Cow; /// A SQLSTATE error code #[derive(PartialEq, Eq, Clone, Debug)] -pub struct SqlState(Cow<'static, str>); +pub struct SqlState(Inner); impl SqlState { /// Creates a `SqlState` from its error code. pub fn from_code(s: &str) -> SqlState { match SQLSTATE_MAP.get(s) { Some(state) => state.clone(), - None => SqlState(Cow::Owned(s.to_string())), + None => SqlState(Inner::Other(s.into())), } } /// Returns the error code corresponding to the `SqlState`. pub fn code(&self) -> &str { - &self.0 + match &self.0 { + Inner::E00000 => "00000", + Inner::E01000 => "01000", + Inner::E0100C => "0100C", + Inner::E01008 => "01008", + Inner::E01003 => "01003", + Inner::E01007 => "01007", + Inner::E01006 => "01006", + Inner::E01004 => "01004", + Inner::E01P01 => "01P01", + Inner::E02000 => "02000", + Inner::E02001 => "02001", + Inner::E03000 => "03000", + Inner::E08000 => "08000", + Inner::E08003 => "08003", + Inner::E08006 => "08006", + Inner::E08001 => "08001", + Inner::E08004 => "08004", + Inner::E08007 => "08007", + Inner::E08P01 => "08P01", + Inner::E09000 => "09000", + Inner::E0A000 => "0A000", + Inner::E0B000 => "0B000", + Inner::E0F000 => "0F000", + Inner::E0F001 => "0F001", + Inner::E0L000 => "0L000", + Inner::E0LP01 => "0LP01", + Inner::E0P000 => "0P000", + Inner::E0Z000 => "0Z000", + Inner::E0Z002 => "0Z002", + Inner::E20000 => "20000", + Inner::E21000 => "21000", + Inner::E22000 => "22000", + Inner::E2202E => "2202E", + Inner::E22021 => "22021", + Inner::E22008 => "22008", + Inner::E22012 => "22012", + Inner::E22005 => "22005", + Inner::E2200B => "2200B", + Inner::E22022 => "22022", + Inner::E22015 => "22015", + Inner::E2201E => "2201E", + Inner::E22014 => "22014", + Inner::E22016 => "22016", + Inner::E2201F => "2201F", + Inner::E2201G => "2201G", + Inner::E22018 => "22018", + Inner::E22007 => "22007", + Inner::E22019 => "22019", + Inner::E2200D => "2200D", + Inner::E22025 => "22025", + Inner::E22P06 => "22P06", + Inner::E22010 => "22010", + Inner::E22023 => "22023", + Inner::E22013 => "22013", + Inner::E2201B => "2201B", + Inner::E2201W => "2201W", + Inner::E2201X => "2201X", + Inner::E2202H => "2202H", + Inner::E2202G => "2202G", + Inner::E22009 => "22009", + Inner::E2200C => "2200C", + Inner::E2200G => "2200G", + Inner::E22004 => "22004", + Inner::E22002 => "22002", + Inner::E22003 => "22003", + Inner::E2200H => "2200H", + Inner::E22026 => "22026", + Inner::E22001 => "22001", + Inner::E22011 => "22011", + Inner::E22027 => "22027", + Inner::E22024 => "22024", + Inner::E2200F => "2200F", + Inner::E22P01 => "22P01", + Inner::E22P02 => "22P02", + Inner::E22P03 => "22P03", + Inner::E22P04 => "22P04", + Inner::E22P05 => "22P05", + Inner::E2200L => "2200L", + Inner::E2200M => "2200M", + Inner::E2200N => "2200N", + Inner::E2200S => "2200S", + Inner::E2200T => "2200T", + Inner::E22030 => "22030", + Inner::E22031 => "22031", + Inner::E22032 => "22032", + Inner::E22033 => "22033", + Inner::E22034 => "22034", + Inner::E22035 => "22035", + Inner::E22036 => "22036", + Inner::E22037 => "22037", + Inner::E22038 => "22038", + Inner::E22039 => "22039", + Inner::E2203A => "2203A", + Inner::E2203B => "2203B", + Inner::E2203C => "2203C", + Inner::E2203D => "2203D", + Inner::E2203E => "2203E", + Inner::E2203F => "2203F", + Inner::E23000 => "23000", + Inner::E23001 => "23001", + Inner::E23502 => "23502", + Inner::E23503 => "23503", + Inner::E23505 => "23505", + Inner::E23514 => "23514", + Inner::E23P01 => "23P01", + Inner::E24000 => "24000", + Inner::E25000 => "25000", + Inner::E25001 => "25001", + Inner::E25002 => "25002", + Inner::E25008 => "25008", + Inner::E25003 => "25003", + Inner::E25004 => "25004", + Inner::E25005 => "25005", + Inner::E25006 => "25006", + Inner::E25007 => "25007", + Inner::E25P01 => "25P01", + Inner::E25P02 => "25P02", + Inner::E25P03 => "25P03", + Inner::E26000 => "26000", + Inner::E27000 => "27000", + Inner::E28000 => "28000", + Inner::E28P01 => "28P01", + Inner::E2B000 => "2B000", + Inner::E2BP01 => "2BP01", + Inner::E2D000 => "2D000", + Inner::E2F000 => "2F000", + Inner::E2F005 => "2F005", + Inner::E2F002 => "2F002", + Inner::E2F003 => "2F003", + Inner::E2F004 => "2F004", + Inner::E34000 => "34000", + Inner::E38000 => "38000", + Inner::E38001 => "38001", + Inner::E38002 => "38002", + Inner::E38003 => "38003", + Inner::E38004 => "38004", + Inner::E39000 => "39000", + Inner::E39001 => "39001", + Inner::E39004 => "39004", + Inner::E39P01 => "39P01", + Inner::E39P02 => "39P02", + Inner::E39P03 => "39P03", + Inner::E3B000 => "3B000", + Inner::E3B001 => "3B001", + Inner::E3D000 => "3D000", + Inner::E3F000 => "3F000", + Inner::E40000 => "40000", + Inner::E40002 => "40002", + Inner::E40001 => "40001", + Inner::E40003 => "40003", + Inner::E40P01 => "40P01", + Inner::E42000 => "42000", + Inner::E42601 => "42601", + Inner::E42501 => "42501", + Inner::E42846 => "42846", + Inner::E42803 => "42803", + Inner::E42P20 => "42P20", + Inner::E42P19 => "42P19", + Inner::E42830 => "42830", + Inner::E42602 => "42602", + Inner::E42622 => "42622", + Inner::E42939 => "42939", + Inner::E42804 => "42804", + Inner::E42P18 => "42P18", + Inner::E42P21 => "42P21", + Inner::E42P22 => "42P22", + Inner::E42809 => "42809", + Inner::E428C9 => "428C9", + Inner::E42703 => "42703", + Inner::E42883 => "42883", + Inner::E42P01 => "42P01", + Inner::E42P02 => "42P02", + Inner::E42704 => "42704", + Inner::E42701 => "42701", + Inner::E42P03 => "42P03", + Inner::E42P04 => "42P04", + Inner::E42723 => "42723", + Inner::E42P05 => "42P05", + Inner::E42P06 => "42P06", + Inner::E42P07 => "42P07", + Inner::E42712 => "42712", + Inner::E42710 => "42710", + Inner::E42702 => "42702", + Inner::E42725 => "42725", + Inner::E42P08 => "42P08", + Inner::E42P09 => "42P09", + Inner::E42P10 => "42P10", + Inner::E42611 => "42611", + Inner::E42P11 => "42P11", + Inner::E42P12 => "42P12", + Inner::E42P13 => "42P13", + Inner::E42P14 => "42P14", + Inner::E42P15 => "42P15", + Inner::E42P16 => "42P16", + Inner::E42P17 => "42P17", + Inner::E44000 => "44000", + Inner::E53000 => "53000", + Inner::E53100 => "53100", + Inner::E53200 => "53200", + Inner::E53300 => "53300", + Inner::E53400 => "53400", + Inner::E54000 => "54000", + Inner::E54001 => "54001", + Inner::E54011 => "54011", + Inner::E54023 => "54023", + Inner::E55000 => "55000", + Inner::E55006 => "55006", + Inner::E55P02 => "55P02", + Inner::E55P03 => "55P03", + Inner::E55P04 => "55P04", + Inner::E57000 => "57000", + Inner::E57014 => "57014", + Inner::E57P01 => "57P01", + Inner::E57P02 => "57P02", + Inner::E57P03 => "57P03", + Inner::E57P04 => "57P04", + Inner::E58000 => "58000", + Inner::E58030 => "58030", + Inner::E58P01 => "58P01", + Inner::E58P02 => "58P02", + Inner::E72000 => "72000", + Inner::EF0000 => "F0000", + Inner::EF0001 => "F0001", + Inner::EHV000 => "HV000", + Inner::EHV005 => "HV005", + Inner::EHV002 => "HV002", + Inner::EHV010 => "HV010", + Inner::EHV021 => "HV021", + Inner::EHV024 => "HV024", + Inner::EHV007 => "HV007", + Inner::EHV008 => "HV008", + Inner::EHV004 => "HV004", + Inner::EHV006 => "HV006", + Inner::EHV091 => "HV091", + Inner::EHV00B => "HV00B", + Inner::EHV00C => "HV00C", + Inner::EHV00D => "HV00D", + Inner::EHV090 => "HV090", + Inner::EHV00A => "HV00A", + Inner::EHV009 => "HV009", + Inner::EHV014 => "HV014", + Inner::EHV001 => "HV001", + Inner::EHV00P => "HV00P", + Inner::EHV00J => "HV00J", + Inner::EHV00K => "HV00K", + Inner::EHV00Q => "HV00Q", + Inner::EHV00R => "HV00R", + Inner::EHV00L => "HV00L", + Inner::EHV00M => "HV00M", + Inner::EHV00N => "HV00N", + Inner::EP0000 => "P0000", + Inner::EP0001 => "P0001", + Inner::EP0002 => "P0002", + Inner::EP0003 => "P0003", + Inner::EP0004 => "P0004", + Inner::EXX000 => "XX000", + Inner::EXX001 => "XX001", + Inner::EXX002 => "XX002", + Inner::Other(code) => code, + } } /// 00000 - pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Cow::Borrowed("00000")); + pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Inner::E00000); /// 01000 - pub const WARNING: SqlState = SqlState(Cow::Borrowed("01000")); + pub const WARNING: SqlState = SqlState(Inner::E01000); /// 0100C - pub const WARNING_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("0100C")); + pub const WARNING_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Inner::E0100C); /// 01008 - pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); + pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Inner::E01008); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Inner::E01003); /// 01007 - pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); + pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Inner::E01007); /// 01006 - pub const WARNING_PRIVILEGE_NOT_REVOKED: SqlState = SqlState(Cow::Borrowed("01006")); + pub const WARNING_PRIVILEGE_NOT_REVOKED: SqlState = SqlState(Inner::E01006); /// 01004 - pub const WARNING_STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("01004")); + pub const WARNING_STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Inner::E01004); /// 01P01 - pub const WARNING_DEPRECATED_FEATURE: SqlState = SqlState(Cow::Borrowed("01P01")); + pub const WARNING_DEPRECATED_FEATURE: SqlState = SqlState(Inner::E01P01); /// 02000 - pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); + pub const NO_DATA: SqlState = SqlState(Inner::E02000); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = - SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Inner::E02001); /// 03000 - pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); + pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Inner::E03000); /// 08000 - pub const CONNECTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("08000")); + pub const CONNECTION_EXCEPTION: SqlState = SqlState(Inner::E08000); /// 08003 - pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(Cow::Borrowed("08003")); + pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(Inner::E08003); /// 08006 - pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); + pub const CONNECTION_FAILURE: SqlState = SqlState(Inner::E08006); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Inner::E08001); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Inner::E08004); /// 08007 - pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); + pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Inner::E08007); /// 08P01 - pub const PROTOCOL_VIOLATION: SqlState = SqlState(Cow::Borrowed("08P01")); + pub const PROTOCOL_VIOLATION: SqlState = SqlState(Inner::E08P01); /// 09000 - pub const TRIGGERED_ACTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("09000")); + pub const TRIGGERED_ACTION_EXCEPTION: SqlState = SqlState(Inner::E09000); /// 0A000 - pub const FEATURE_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("0A000")); + pub const FEATURE_NOT_SUPPORTED: SqlState = SqlState(Inner::E0A000); /// 0B000 - pub const INVALID_TRANSACTION_INITIATION: SqlState = SqlState(Cow::Borrowed("0B000")); + pub const INVALID_TRANSACTION_INITIATION: SqlState = SqlState(Inner::E0B000); /// 0F000 - pub const LOCATOR_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0F000")); + pub const LOCATOR_EXCEPTION: SqlState = SqlState(Inner::E0F000); /// 0F001 - pub const L_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0F001")); + pub const L_E_INVALID_SPECIFICATION: SqlState = SqlState(Inner::E0F001); /// 0L000 - pub const INVALID_GRANTOR: SqlState = SqlState(Cow::Borrowed("0L000")); + pub const INVALID_GRANTOR: SqlState = SqlState(Inner::E0L000); /// 0LP01 - pub const INVALID_GRANT_OPERATION: SqlState = SqlState(Cow::Borrowed("0LP01")); + pub const INVALID_GRANT_OPERATION: SqlState = SqlState(Inner::E0LP01); /// 0P000 - pub const INVALID_ROLE_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0P000")); + pub const INVALID_ROLE_SPECIFICATION: SqlState = SqlState(Inner::E0P000); /// 0Z000 - pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); + pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Inner::E0Z000); /// 0Z002 pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = - SqlState(Cow::Borrowed("0Z002")); + SqlState(Inner::E0Z002); /// 20000 - pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); + pub const CASE_NOT_FOUND: SqlState = SqlState(Inner::E20000); /// 21000 - pub const CARDINALITY_VIOLATION: SqlState = SqlState(Cow::Borrowed("21000")); + pub const CARDINALITY_VIOLATION: SqlState = SqlState(Inner::E21000); /// 22000 - pub const DATA_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22000")); + pub const DATA_EXCEPTION: SqlState = SqlState(Inner::E22000); /// 2202E - pub const ARRAY_ELEMENT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); + pub const ARRAY_ELEMENT_ERROR: SqlState = SqlState(Inner::E2202E); /// 2202E - pub const ARRAY_SUBSCRIPT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); + pub const ARRAY_SUBSCRIPT_ERROR: SqlState = SqlState(Inner::E2202E); /// 22021 - pub const CHARACTER_NOT_IN_REPERTOIRE: SqlState = SqlState(Cow::Borrowed("22021")); + pub const CHARACTER_NOT_IN_REPERTOIRE: SqlState = SqlState(Inner::E22021); /// 22008 - pub const DATETIME_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22008")); + pub const DATETIME_FIELD_OVERFLOW: SqlState = SqlState(Inner::E22008); /// 22008 - pub const DATETIME_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22008")); + pub const DATETIME_VALUE_OUT_OF_RANGE: SqlState = SqlState(Inner::E22008); /// 22012 - pub const DIVISION_BY_ZERO: SqlState = SqlState(Cow::Borrowed("22012")); + pub const DIVISION_BY_ZERO: SqlState = SqlState(Inner::E22012); /// 22005 - pub const ERROR_IN_ASSIGNMENT: SqlState = SqlState(Cow::Borrowed("22005")); + pub const ERROR_IN_ASSIGNMENT: SqlState = SqlState(Inner::E22005); /// 2200B - pub const ESCAPE_CHARACTER_CONFLICT: SqlState = SqlState(Cow::Borrowed("2200B")); + pub const ESCAPE_CHARACTER_CONFLICT: SqlState = SqlState(Inner::E2200B); /// 22022 - pub const INDICATOR_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22022")); + pub const INDICATOR_OVERFLOW: SqlState = SqlState(Inner::E22022); /// 22015 - pub const INTERVAL_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22015")); + pub const INTERVAL_FIELD_OVERFLOW: SqlState = SqlState(Inner::E22015); /// 2201E - pub const INVALID_ARGUMENT_FOR_LOG: SqlState = SqlState(Cow::Borrowed("2201E")); + pub const INVALID_ARGUMENT_FOR_LOG: SqlState = SqlState(Inner::E2201E); /// 22014 - pub const INVALID_ARGUMENT_FOR_NTILE: SqlState = SqlState(Cow::Borrowed("22014")); + pub const INVALID_ARGUMENT_FOR_NTILE: SqlState = SqlState(Inner::E22014); /// 22016 - pub const INVALID_ARGUMENT_FOR_NTH_VALUE: SqlState = SqlState(Cow::Borrowed("22016")); + pub const INVALID_ARGUMENT_FOR_NTH_VALUE: SqlState = SqlState(Inner::E22016); /// 2201F - pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); + pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Inner::E2201F); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Inner::E2201G); /// 22018 - pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); + pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Inner::E22018); /// 22007 - pub const INVALID_DATETIME_FORMAT: SqlState = SqlState(Cow::Borrowed("22007")); + pub const INVALID_DATETIME_FORMAT: SqlState = SqlState(Inner::E22007); /// 22019 - pub const INVALID_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22019")); + pub const INVALID_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E22019); /// 2200D - pub const INVALID_ESCAPE_OCTET: SqlState = SqlState(Cow::Borrowed("2200D")); + pub const INVALID_ESCAPE_OCTET: SqlState = SqlState(Inner::E2200D); /// 22025 - pub const INVALID_ESCAPE_SEQUENCE: SqlState = SqlState(Cow::Borrowed("22025")); + pub const INVALID_ESCAPE_SEQUENCE: SqlState = SqlState(Inner::E22025); /// 22P06 - pub const NONSTANDARD_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P06")); + pub const NONSTANDARD_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E22P06); /// 22010 - pub const INVALID_INDICATOR_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22010")); + pub const INVALID_INDICATOR_PARAMETER_VALUE: SqlState = SqlState(Inner::E22010); /// 22023 - pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22023")); + pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Inner::E22023); /// 22013 - pub const INVALID_PRECEDING_OR_FOLLOWING_SIZE: SqlState = SqlState(Cow::Borrowed("22013")); + pub const INVALID_PRECEDING_OR_FOLLOWING_SIZE: SqlState = SqlState(Inner::E22013); /// 2201B - pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Cow::Borrowed("2201B")); + pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Inner::E2201B); /// 2201W - pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); + pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Inner::E2201W); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = - SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Inner::E2201X); /// 2202H - pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); + pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Inner::E2202H); /// 2202G - pub const INVALID_TABLESAMPLE_REPEAT: SqlState = SqlState(Cow::Borrowed("2202G")); + pub const INVALID_TABLESAMPLE_REPEAT: SqlState = SqlState(Inner::E2202G); /// 22009 - pub const INVALID_TIME_ZONE_DISPLACEMENT_VALUE: SqlState = SqlState(Cow::Borrowed("22009")); + pub const INVALID_TIME_ZONE_DISPLACEMENT_VALUE: SqlState = SqlState(Inner::E22009); /// 2200C - pub const INVALID_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("2200C")); + pub const INVALID_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E2200C); /// 2200G - pub const MOST_SPECIFIC_TYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("2200G")); + pub const MOST_SPECIFIC_TYPE_MISMATCH: SqlState = SqlState(Inner::E2200G); /// 22004 - pub const NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("22004")); + pub const NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Inner::E22004); /// 22002 - pub const NULL_VALUE_NO_INDICATOR_PARAMETER: SqlState = SqlState(Cow::Borrowed("22002")); + pub const NULL_VALUE_NO_INDICATOR_PARAMETER: SqlState = SqlState(Inner::E22002); /// 22003 - pub const NUMERIC_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22003")); + pub const NUMERIC_VALUE_OUT_OF_RANGE: SqlState = SqlState(Inner::E22003); /// 2200H - pub const SEQUENCE_GENERATOR_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("2200H")); + pub const SEQUENCE_GENERATOR_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E2200H); /// 22026 - pub const STRING_DATA_LENGTH_MISMATCH: SqlState = SqlState(Cow::Borrowed("22026")); + pub const STRING_DATA_LENGTH_MISMATCH: SqlState = SqlState(Inner::E22026); /// 22001 - pub const STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("22001")); + pub const STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Inner::E22001); /// 22011 - pub const SUBSTRING_ERROR: SqlState = SqlState(Cow::Borrowed("22011")); + pub const SUBSTRING_ERROR: SqlState = SqlState(Inner::E22011); /// 22027 - pub const TRIM_ERROR: SqlState = SqlState(Cow::Borrowed("22027")); + pub const TRIM_ERROR: SqlState = SqlState(Inner::E22027); /// 22024 - pub const UNTERMINATED_C_STRING: SqlState = SqlState(Cow::Borrowed("22024")); + pub const UNTERMINATED_C_STRING: SqlState = SqlState(Inner::E22024); /// 2200F - pub const ZERO_LENGTH_CHARACTER_STRING: SqlState = SqlState(Cow::Borrowed("2200F")); + pub const ZERO_LENGTH_CHARACTER_STRING: SqlState = SqlState(Inner::E2200F); /// 22P01 - pub const FLOATING_POINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22P01")); + pub const FLOATING_POINT_EXCEPTION: SqlState = SqlState(Inner::E22P01); /// 22P02 - pub const INVALID_TEXT_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P02")); + pub const INVALID_TEXT_REPRESENTATION: SqlState = SqlState(Inner::E22P02); /// 22P03 - pub const INVALID_BINARY_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P03")); + pub const INVALID_BINARY_REPRESENTATION: SqlState = SqlState(Inner::E22P03); /// 22P04 - pub const BAD_COPY_FILE_FORMAT: SqlState = SqlState(Cow::Borrowed("22P04")); + pub const BAD_COPY_FILE_FORMAT: SqlState = SqlState(Inner::E22P04); /// 22P05 - pub const UNTRANSLATABLE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P05")); + pub const UNTRANSLATABLE_CHARACTER: SqlState = SqlState(Inner::E22P05); /// 2200L - pub const NOT_AN_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200L")); + pub const NOT_AN_XML_DOCUMENT: SqlState = SqlState(Inner::E2200L); /// 2200M - pub const INVALID_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200M")); + pub const INVALID_XML_DOCUMENT: SqlState = SqlState(Inner::E2200M); /// 2200N - pub const INVALID_XML_CONTENT: SqlState = SqlState(Cow::Borrowed("2200N")); + pub const INVALID_XML_CONTENT: SqlState = SqlState(Inner::E2200N); /// 2200S - pub const INVALID_XML_COMMENT: SqlState = SqlState(Cow::Borrowed("2200S")); + pub const INVALID_XML_COMMENT: SqlState = SqlState(Inner::E2200S); /// 2200T - pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Cow::Borrowed("2200T")); + pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Inner::E2200T); /// 22030 - pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Cow::Borrowed("22030")); + pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Inner::E22030); /// 22031 - pub const INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION: SqlState = - SqlState(Cow::Borrowed("22031")); + pub const INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION: SqlState = SqlState(Inner::E22031); /// 22032 - pub const INVALID_JSON_TEXT: SqlState = SqlState(Cow::Borrowed("22032")); + pub const INVALID_JSON_TEXT: SqlState = SqlState(Inner::E22032); /// 22033 - pub const INVALID_SQL_JSON_SUBSCRIPT: SqlState = SqlState(Cow::Borrowed("22033")); + pub const INVALID_SQL_JSON_SUBSCRIPT: SqlState = SqlState(Inner::E22033); /// 22034 - pub const MORE_THAN_ONE_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22034")); + pub const MORE_THAN_ONE_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22034); /// 22035 - pub const NO_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22035")); + pub const NO_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22035); /// 22036 - pub const NON_NUMERIC_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22036")); + pub const NON_NUMERIC_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22036); /// 22037 - pub const NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: SqlState = SqlState(Cow::Borrowed("22037")); + pub const NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: SqlState = SqlState(Inner::E22037); /// 22038 - pub const SINGLETON_SQL_JSON_ITEM_REQUIRED: SqlState = SqlState(Cow::Borrowed("22038")); + pub const SINGLETON_SQL_JSON_ITEM_REQUIRED: SqlState = SqlState(Inner::E22038); /// 22039 - pub const SQL_JSON_ARRAY_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("22039")); + pub const SQL_JSON_ARRAY_NOT_FOUND: SqlState = SqlState(Inner::E22039); /// 2203A - pub const SQL_JSON_MEMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203A")); + pub const SQL_JSON_MEMBER_NOT_FOUND: SqlState = SqlState(Inner::E2203A); /// 2203B - pub const SQL_JSON_NUMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203B")); + pub const SQL_JSON_NUMBER_NOT_FOUND: SqlState = SqlState(Inner::E2203B); /// 2203C - pub const SQL_JSON_OBJECT_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203C")); + pub const SQL_JSON_OBJECT_NOT_FOUND: SqlState = SqlState(Inner::E2203C); /// 2203D - pub const TOO_MANY_JSON_ARRAY_ELEMENTS: SqlState = SqlState(Cow::Borrowed("2203D")); + pub const TOO_MANY_JSON_ARRAY_ELEMENTS: SqlState = SqlState(Inner::E2203D); /// 2203E - pub const TOO_MANY_JSON_OBJECT_MEMBERS: SqlState = SqlState(Cow::Borrowed("2203E")); + pub const TOO_MANY_JSON_OBJECT_MEMBERS: SqlState = SqlState(Inner::E2203E); /// 2203F - pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Cow::Borrowed("2203F")); + pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Inner::E2203F); /// 23000 - pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23000")); + pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Inner::E23000); /// 23001 - pub const RESTRICT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23001")); + pub const RESTRICT_VIOLATION: SqlState = SqlState(Inner::E23001); /// 23502 - pub const NOT_NULL_VIOLATION: SqlState = SqlState(Cow::Borrowed("23502")); + pub const NOT_NULL_VIOLATION: SqlState = SqlState(Inner::E23502); /// 23503 - pub const FOREIGN_KEY_VIOLATION: SqlState = SqlState(Cow::Borrowed("23503")); + pub const FOREIGN_KEY_VIOLATION: SqlState = SqlState(Inner::E23503); /// 23505 - pub const UNIQUE_VIOLATION: SqlState = SqlState(Cow::Borrowed("23505")); + pub const UNIQUE_VIOLATION: SqlState = SqlState(Inner::E23505); /// 23514 - pub const CHECK_VIOLATION: SqlState = SqlState(Cow::Borrowed("23514")); + pub const CHECK_VIOLATION: SqlState = SqlState(Inner::E23514); /// 23P01 - pub const EXCLUSION_VIOLATION: SqlState = SqlState(Cow::Borrowed("23P01")); + pub const EXCLUSION_VIOLATION: SqlState = SqlState(Inner::E23P01); /// 24000 - pub const INVALID_CURSOR_STATE: SqlState = SqlState(Cow::Borrowed("24000")); + pub const INVALID_CURSOR_STATE: SqlState = SqlState(Inner::E24000); /// 25000 - pub const INVALID_TRANSACTION_STATE: SqlState = SqlState(Cow::Borrowed("25000")); + pub const INVALID_TRANSACTION_STATE: SqlState = SqlState(Inner::E25000); /// 25001 - pub const ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25001")); + pub const ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Inner::E25001); /// 25002 - pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); + pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Inner::E25002); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = - SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Inner::E25008); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25003); /// 25004 pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25004")); + SqlState(Inner::E25004); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25005); /// 25006 - pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); + pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Inner::E25006); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = - SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Inner::E25007); /// 25P01 - pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); + pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Inner::E25P01); /// 25P02 - pub const IN_FAILED_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P02")); + pub const IN_FAILED_SQL_TRANSACTION: SqlState = SqlState(Inner::E25P02); /// 25P03 - pub const IDLE_IN_TRANSACTION_SESSION_TIMEOUT: SqlState = SqlState(Cow::Borrowed("25P03")); + pub const IDLE_IN_TRANSACTION_SESSION_TIMEOUT: SqlState = SqlState(Inner::E25P03); /// 26000 - pub const INVALID_SQL_STATEMENT_NAME: SqlState = SqlState(Cow::Borrowed("26000")); + pub const INVALID_SQL_STATEMENT_NAME: SqlState = SqlState(Inner::E26000); /// 26000 - pub const UNDEFINED_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("26000")); + pub const UNDEFINED_PSTATEMENT: SqlState = SqlState(Inner::E26000); /// 27000 - pub const TRIGGERED_DATA_CHANGE_VIOLATION: SqlState = SqlState(Cow::Borrowed("27000")); + pub const TRIGGERED_DATA_CHANGE_VIOLATION: SqlState = SqlState(Inner::E27000); /// 28000 - pub const INVALID_AUTHORIZATION_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("28000")); + pub const INVALID_AUTHORIZATION_SPECIFICATION: SqlState = SqlState(Inner::E28000); /// 28P01 - pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); + pub const INVALID_PASSWORD: SqlState = SqlState(Inner::E28P01); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = - SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Inner::E2B000); /// 2BP01 - pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); + pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Inner::E2BP01); /// 2D000 - pub const INVALID_TRANSACTION_TERMINATION: SqlState = SqlState(Cow::Borrowed("2D000")); + pub const INVALID_TRANSACTION_TERMINATION: SqlState = SqlState(Inner::E2D000); /// 2F000 - pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); + pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Inner::E2F000); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = - SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Inner::E2F005); /// 2F002 - pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); + pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E2F002); /// 2F003 - pub const S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("2F003")); + pub const S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Inner::E2F003); /// 2F004 - pub const S_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F004")); + pub const S_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E2F004); /// 34000 - pub const INVALID_CURSOR_NAME: SqlState = SqlState(Cow::Borrowed("34000")); + pub const INVALID_CURSOR_NAME: SqlState = SqlState(Inner::E34000); /// 34000 - pub const UNDEFINED_CURSOR: SqlState = SqlState(Cow::Borrowed("34000")); + pub const UNDEFINED_CURSOR: SqlState = SqlState(Inner::E34000); /// 38000 - pub const EXTERNAL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("38000")); + pub const EXTERNAL_ROUTINE_EXCEPTION: SqlState = SqlState(Inner::E38000); /// 38001 - pub const E_R_E_CONTAINING_SQL_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38001")); + pub const E_R_E_CONTAINING_SQL_NOT_PERMITTED: SqlState = SqlState(Inner::E38001); /// 38002 - pub const E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38002")); + pub const E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E38002); /// 38003 - pub const E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("38003")); + pub const E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Inner::E38003); /// 38004 - pub const E_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38004")); + pub const E_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E38004); /// 39000 - pub const EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("39000")); + pub const EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: SqlState = SqlState(Inner::E39000); /// 39001 - pub const E_R_I_E_INVALID_SQLSTATE_RETURNED: SqlState = SqlState(Cow::Borrowed("39001")); + pub const E_R_I_E_INVALID_SQLSTATE_RETURNED: SqlState = SqlState(Inner::E39001); /// 39004 - pub const E_R_I_E_NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("39004")); + pub const E_R_I_E_NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Inner::E39004); /// 39P01 - pub const E_R_I_E_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P01")); + pub const E_R_I_E_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P01); /// 39P02 - pub const E_R_I_E_SRF_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P02")); + pub const E_R_I_E_SRF_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P02); /// 39P03 - pub const E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P03")); + pub const E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P03); /// 3B000 - pub const SAVEPOINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("3B000")); + pub const SAVEPOINT_EXCEPTION: SqlState = SqlState(Inner::E3B000); /// 3B001 - pub const S_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("3B001")); + pub const S_E_INVALID_SPECIFICATION: SqlState = SqlState(Inner::E3B001); /// 3D000 - pub const INVALID_CATALOG_NAME: SqlState = SqlState(Cow::Borrowed("3D000")); + pub const INVALID_CATALOG_NAME: SqlState = SqlState(Inner::E3D000); /// 3D000 - pub const UNDEFINED_DATABASE: SqlState = SqlState(Cow::Borrowed("3D000")); + pub const UNDEFINED_DATABASE: SqlState = SqlState(Inner::E3D000); /// 3F000 - pub const INVALID_SCHEMA_NAME: SqlState = SqlState(Cow::Borrowed("3F000")); + pub const INVALID_SCHEMA_NAME: SqlState = SqlState(Inner::E3F000); /// 3F000 - pub const UNDEFINED_SCHEMA: SqlState = SqlState(Cow::Borrowed("3F000")); + pub const UNDEFINED_SCHEMA: SqlState = SqlState(Inner::E3F000); /// 40000 - pub const TRANSACTION_ROLLBACK: SqlState = SqlState(Cow::Borrowed("40000")); + pub const TRANSACTION_ROLLBACK: SqlState = SqlState(Inner::E40000); /// 40002 - pub const T_R_INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("40002")); + pub const T_R_INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Inner::E40002); /// 40001 - pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(Cow::Borrowed("40001")); + pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(Inner::E40001); /// 40003 - pub const T_R_STATEMENT_COMPLETION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("40003")); + pub const T_R_STATEMENT_COMPLETION_UNKNOWN: SqlState = SqlState(Inner::E40003); /// 40P01 - pub const T_R_DEADLOCK_DETECTED: SqlState = SqlState(Cow::Borrowed("40P01")); + pub const T_R_DEADLOCK_DETECTED: SqlState = SqlState(Inner::E40P01); /// 42000 - pub const SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: SqlState = SqlState(Cow::Borrowed("42000")); + pub const SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: SqlState = SqlState(Inner::E42000); /// 42601 - pub const SYNTAX_ERROR: SqlState = SqlState(Cow::Borrowed("42601")); + pub const SYNTAX_ERROR: SqlState = SqlState(Inner::E42601); /// 42501 - pub const INSUFFICIENT_PRIVILEGE: SqlState = SqlState(Cow::Borrowed("42501")); + pub const INSUFFICIENT_PRIVILEGE: SqlState = SqlState(Inner::E42501); /// 42846 - pub const CANNOT_COERCE: SqlState = SqlState(Cow::Borrowed("42846")); + pub const CANNOT_COERCE: SqlState = SqlState(Inner::E42846); /// 42803 - pub const GROUPING_ERROR: SqlState = SqlState(Cow::Borrowed("42803")); + pub const GROUPING_ERROR: SqlState = SqlState(Inner::E42803); /// 42P20 - pub const WINDOWING_ERROR: SqlState = SqlState(Cow::Borrowed("42P20")); + pub const WINDOWING_ERROR: SqlState = SqlState(Inner::E42P20); /// 42P19 - pub const INVALID_RECURSION: SqlState = SqlState(Cow::Borrowed("42P19")); + pub const INVALID_RECURSION: SqlState = SqlState(Inner::E42P19); /// 42830 - pub const INVALID_FOREIGN_KEY: SqlState = SqlState(Cow::Borrowed("42830")); + pub const INVALID_FOREIGN_KEY: SqlState = SqlState(Inner::E42830); /// 42602 - pub const INVALID_NAME: SqlState = SqlState(Cow::Borrowed("42602")); + pub const INVALID_NAME: SqlState = SqlState(Inner::E42602); /// 42622 - pub const NAME_TOO_LONG: SqlState = SqlState(Cow::Borrowed("42622")); + pub const NAME_TOO_LONG: SqlState = SqlState(Inner::E42622); /// 42939 - pub const RESERVED_NAME: SqlState = SqlState(Cow::Borrowed("42939")); + pub const RESERVED_NAME: SqlState = SqlState(Inner::E42939); /// 42804 - pub const DATATYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("42804")); + pub const DATATYPE_MISMATCH: SqlState = SqlState(Inner::E42804); /// 42P18 - pub const INDETERMINATE_DATATYPE: SqlState = SqlState(Cow::Borrowed("42P18")); + pub const INDETERMINATE_DATATYPE: SqlState = SqlState(Inner::E42P18); /// 42P21 - pub const COLLATION_MISMATCH: SqlState = SqlState(Cow::Borrowed("42P21")); + pub const COLLATION_MISMATCH: SqlState = SqlState(Inner::E42P21); /// 42P22 - pub const INDETERMINATE_COLLATION: SqlState = SqlState(Cow::Borrowed("42P22")); + pub const INDETERMINATE_COLLATION: SqlState = SqlState(Inner::E42P22); /// 42809 - pub const WRONG_OBJECT_TYPE: SqlState = SqlState(Cow::Borrowed("42809")); + pub const WRONG_OBJECT_TYPE: SqlState = SqlState(Inner::E42809); /// 428C9 - pub const GENERATED_ALWAYS: SqlState = SqlState(Cow::Borrowed("428C9")); + pub const GENERATED_ALWAYS: SqlState = SqlState(Inner::E428C9); /// 42703 - pub const UNDEFINED_COLUMN: SqlState = SqlState(Cow::Borrowed("42703")); + pub const UNDEFINED_COLUMN: SqlState = SqlState(Inner::E42703); /// 42883 - pub const UNDEFINED_FUNCTION: SqlState = SqlState(Cow::Borrowed("42883")); + pub const UNDEFINED_FUNCTION: SqlState = SqlState(Inner::E42883); /// 42P01 - pub const UNDEFINED_TABLE: SqlState = SqlState(Cow::Borrowed("42P01")); + pub const UNDEFINED_TABLE: SqlState = SqlState(Inner::E42P01); /// 42P02 - pub const UNDEFINED_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P02")); + pub const UNDEFINED_PARAMETER: SqlState = SqlState(Inner::E42P02); /// 42704 - pub const UNDEFINED_OBJECT: SqlState = SqlState(Cow::Borrowed("42704")); + pub const UNDEFINED_OBJECT: SqlState = SqlState(Inner::E42704); /// 42701 - pub const DUPLICATE_COLUMN: SqlState = SqlState(Cow::Borrowed("42701")); + pub const DUPLICATE_COLUMN: SqlState = SqlState(Inner::E42701); /// 42P03 - pub const DUPLICATE_CURSOR: SqlState = SqlState(Cow::Borrowed("42P03")); + pub const DUPLICATE_CURSOR: SqlState = SqlState(Inner::E42P03); /// 42P04 - pub const DUPLICATE_DATABASE: SqlState = SqlState(Cow::Borrowed("42P04")); + pub const DUPLICATE_DATABASE: SqlState = SqlState(Inner::E42P04); /// 42723 - pub const DUPLICATE_FUNCTION: SqlState = SqlState(Cow::Borrowed("42723")); + pub const DUPLICATE_FUNCTION: SqlState = SqlState(Inner::E42723); /// 42P05 - pub const DUPLICATE_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("42P05")); + pub const DUPLICATE_PSTATEMENT: SqlState = SqlState(Inner::E42P05); /// 42P06 - pub const DUPLICATE_SCHEMA: SqlState = SqlState(Cow::Borrowed("42P06")); + pub const DUPLICATE_SCHEMA: SqlState = SqlState(Inner::E42P06); /// 42P07 - pub const DUPLICATE_TABLE: SqlState = SqlState(Cow::Borrowed("42P07")); + pub const DUPLICATE_TABLE: SqlState = SqlState(Inner::E42P07); /// 42712 - pub const DUPLICATE_ALIAS: SqlState = SqlState(Cow::Borrowed("42712")); + pub const DUPLICATE_ALIAS: SqlState = SqlState(Inner::E42712); /// 42710 - pub const DUPLICATE_OBJECT: SqlState = SqlState(Cow::Borrowed("42710")); + pub const DUPLICATE_OBJECT: SqlState = SqlState(Inner::E42710); /// 42702 - pub const AMBIGUOUS_COLUMN: SqlState = SqlState(Cow::Borrowed("42702")); + pub const AMBIGUOUS_COLUMN: SqlState = SqlState(Inner::E42702); /// 42725 - pub const AMBIGUOUS_FUNCTION: SqlState = SqlState(Cow::Borrowed("42725")); + pub const AMBIGUOUS_FUNCTION: SqlState = SqlState(Inner::E42725); /// 42P08 - pub const AMBIGUOUS_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P08")); + pub const AMBIGUOUS_PARAMETER: SqlState = SqlState(Inner::E42P08); /// 42P09 - pub const AMBIGUOUS_ALIAS: SqlState = SqlState(Cow::Borrowed("42P09")); + pub const AMBIGUOUS_ALIAS: SqlState = SqlState(Inner::E42P09); /// 42P10 - pub const INVALID_COLUMN_REFERENCE: SqlState = SqlState(Cow::Borrowed("42P10")); + pub const INVALID_COLUMN_REFERENCE: SqlState = SqlState(Inner::E42P10); /// 42611 - pub const INVALID_COLUMN_DEFINITION: SqlState = SqlState(Cow::Borrowed("42611")); + pub const INVALID_COLUMN_DEFINITION: SqlState = SqlState(Inner::E42611); /// 42P11 - pub const INVALID_CURSOR_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P11")); + pub const INVALID_CURSOR_DEFINITION: SqlState = SqlState(Inner::E42P11); /// 42P12 - pub const INVALID_DATABASE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P12")); + pub const INVALID_DATABASE_DEFINITION: SqlState = SqlState(Inner::E42P12); /// 42P13 - pub const INVALID_FUNCTION_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P13")); + pub const INVALID_FUNCTION_DEFINITION: SqlState = SqlState(Inner::E42P13); /// 42P14 - pub const INVALID_PSTATEMENT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P14")); + pub const INVALID_PSTATEMENT_DEFINITION: SqlState = SqlState(Inner::E42P14); /// 42P15 - pub const INVALID_SCHEMA_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P15")); + pub const INVALID_SCHEMA_DEFINITION: SqlState = SqlState(Inner::E42P15); /// 42P16 - pub const INVALID_TABLE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P16")); + pub const INVALID_TABLE_DEFINITION: SqlState = SqlState(Inner::E42P16); /// 42P17 - pub const INVALID_OBJECT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P17")); + pub const INVALID_OBJECT_DEFINITION: SqlState = SqlState(Inner::E42P17); /// 44000 - pub const WITH_CHECK_OPTION_VIOLATION: SqlState = SqlState(Cow::Borrowed("44000")); + pub const WITH_CHECK_OPTION_VIOLATION: SqlState = SqlState(Inner::E44000); /// 53000 - pub const INSUFFICIENT_RESOURCES: SqlState = SqlState(Cow::Borrowed("53000")); + pub const INSUFFICIENT_RESOURCES: SqlState = SqlState(Inner::E53000); /// 53100 - pub const DISK_FULL: SqlState = SqlState(Cow::Borrowed("53100")); + pub const DISK_FULL: SqlState = SqlState(Inner::E53100); /// 53200 - pub const OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("53200")); + pub const OUT_OF_MEMORY: SqlState = SqlState(Inner::E53200); /// 53300 - pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(Cow::Borrowed("53300")); + pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(Inner::E53300); /// 53400 - pub const CONFIGURATION_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("53400")); + pub const CONFIGURATION_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E53400); /// 54000 - pub const PROGRAM_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("54000")); + pub const PROGRAM_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E54000); /// 54001 - pub const STATEMENT_TOO_COMPLEX: SqlState = SqlState(Cow::Borrowed("54001")); + pub const STATEMENT_TOO_COMPLEX: SqlState = SqlState(Inner::E54001); /// 54011 - pub const TOO_MANY_COLUMNS: SqlState = SqlState(Cow::Borrowed("54011")); + pub const TOO_MANY_COLUMNS: SqlState = SqlState(Inner::E54011); /// 54023 - pub const TOO_MANY_ARGUMENTS: SqlState = SqlState(Cow::Borrowed("54023")); + pub const TOO_MANY_ARGUMENTS: SqlState = SqlState(Inner::E54023); /// 55000 - pub const OBJECT_NOT_IN_PREREQUISITE_STATE: SqlState = SqlState(Cow::Borrowed("55000")); + pub const OBJECT_NOT_IN_PREREQUISITE_STATE: SqlState = SqlState(Inner::E55000); /// 55006 - pub const OBJECT_IN_USE: SqlState = SqlState(Cow::Borrowed("55006")); + pub const OBJECT_IN_USE: SqlState = SqlState(Inner::E55006); /// 55P02 - pub const CANT_CHANGE_RUNTIME_PARAM: SqlState = SqlState(Cow::Borrowed("55P02")); + pub const CANT_CHANGE_RUNTIME_PARAM: SqlState = SqlState(Inner::E55P02); /// 55P03 - pub const LOCK_NOT_AVAILABLE: SqlState = SqlState(Cow::Borrowed("55P03")); + pub const LOCK_NOT_AVAILABLE: SqlState = SqlState(Inner::E55P03); /// 55P04 - pub const UNSAFE_NEW_ENUM_VALUE_USAGE: SqlState = SqlState(Cow::Borrowed("55P04")); + pub const UNSAFE_NEW_ENUM_VALUE_USAGE: SqlState = SqlState(Inner::E55P04); /// 57000 - pub const OPERATOR_INTERVENTION: SqlState = SqlState(Cow::Borrowed("57000")); + pub const OPERATOR_INTERVENTION: SqlState = SqlState(Inner::E57000); /// 57014 - pub const QUERY_CANCELED: SqlState = SqlState(Cow::Borrowed("57014")); + pub const QUERY_CANCELED: SqlState = SqlState(Inner::E57014); /// 57P01 - pub const ADMIN_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P01")); + pub const ADMIN_SHUTDOWN: SqlState = SqlState(Inner::E57P01); /// 57P02 - pub const CRASH_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P02")); + pub const CRASH_SHUTDOWN: SqlState = SqlState(Inner::E57P02); /// 57P03 - pub const CANNOT_CONNECT_NOW: SqlState = SqlState(Cow::Borrowed("57P03")); + pub const CANNOT_CONNECT_NOW: SqlState = SqlState(Inner::E57P03); /// 57P04 - pub const DATABASE_DROPPED: SqlState = SqlState(Cow::Borrowed("57P04")); + pub const DATABASE_DROPPED: SqlState = SqlState(Inner::E57P04); /// 58000 - pub const SYSTEM_ERROR: SqlState = SqlState(Cow::Borrowed("58000")); + pub const SYSTEM_ERROR: SqlState = SqlState(Inner::E58000); /// 58030 - pub const IO_ERROR: SqlState = SqlState(Cow::Borrowed("58030")); + pub const IO_ERROR: SqlState = SqlState(Inner::E58030); /// 58P01 - pub const UNDEFINED_FILE: SqlState = SqlState(Cow::Borrowed("58P01")); + pub const UNDEFINED_FILE: SqlState = SqlState(Inner::E58P01); /// 58P02 - pub const DUPLICATE_FILE: SqlState = SqlState(Cow::Borrowed("58P02")); + pub const DUPLICATE_FILE: SqlState = SqlState(Inner::E58P02); /// 72000 - pub const SNAPSHOT_TOO_OLD: SqlState = SqlState(Cow::Borrowed("72000")); + pub const SNAPSHOT_TOO_OLD: SqlState = SqlState(Inner::E72000); /// F0000 - pub const CONFIG_FILE_ERROR: SqlState = SqlState(Cow::Borrowed("F0000")); + pub const CONFIG_FILE_ERROR: SqlState = SqlState(Inner::EF0000); /// F0001 - pub const LOCK_FILE_EXISTS: SqlState = SqlState(Cow::Borrowed("F0001")); + pub const LOCK_FILE_EXISTS: SqlState = SqlState(Inner::EF0001); /// HV000 - pub const FDW_ERROR: SqlState = SqlState(Cow::Borrowed("HV000")); + pub const FDW_ERROR: SqlState = SqlState(Inner::EHV000); /// HV005 - pub const FDW_COLUMN_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV005")); + pub const FDW_COLUMN_NAME_NOT_FOUND: SqlState = SqlState(Inner::EHV005); /// HV002 - pub const FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: SqlState = SqlState(Cow::Borrowed("HV002")); + pub const FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: SqlState = SqlState(Inner::EHV002); /// HV010 - pub const FDW_FUNCTION_SEQUENCE_ERROR: SqlState = SqlState(Cow::Borrowed("HV010")); + pub const FDW_FUNCTION_SEQUENCE_ERROR: SqlState = SqlState(Inner::EHV010); /// HV021 - pub const FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: SqlState = SqlState(Cow::Borrowed("HV021")); + pub const FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: SqlState = SqlState(Inner::EHV021); /// HV024 - pub const FDW_INVALID_ATTRIBUTE_VALUE: SqlState = SqlState(Cow::Borrowed("HV024")); + pub const FDW_INVALID_ATTRIBUTE_VALUE: SqlState = SqlState(Inner::EHV024); /// HV007 - pub const FDW_INVALID_COLUMN_NAME: SqlState = SqlState(Cow::Borrowed("HV007")); + pub const FDW_INVALID_COLUMN_NAME: SqlState = SqlState(Inner::EHV007); /// HV008 - pub const FDW_INVALID_COLUMN_NUMBER: SqlState = SqlState(Cow::Borrowed("HV008")); + pub const FDW_INVALID_COLUMN_NUMBER: SqlState = SqlState(Inner::EHV008); /// HV004 - pub const FDW_INVALID_DATA_TYPE: SqlState = SqlState(Cow::Borrowed("HV004")); + pub const FDW_INVALID_DATA_TYPE: SqlState = SqlState(Inner::EHV004); /// HV006 - pub const FDW_INVALID_DATA_TYPE_DESCRIPTORS: SqlState = SqlState(Cow::Borrowed("HV006")); + pub const FDW_INVALID_DATA_TYPE_DESCRIPTORS: SqlState = SqlState(Inner::EHV006); /// HV091 - pub const FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: SqlState = SqlState(Cow::Borrowed("HV091")); + pub const FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: SqlState = SqlState(Inner::EHV091); /// HV00B - pub const FDW_INVALID_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00B")); + pub const FDW_INVALID_HANDLE: SqlState = SqlState(Inner::EHV00B); /// HV00C - pub const FDW_INVALID_OPTION_INDEX: SqlState = SqlState(Cow::Borrowed("HV00C")); + pub const FDW_INVALID_OPTION_INDEX: SqlState = SqlState(Inner::EHV00C); /// HV00D - pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); + pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Inner::EHV00D); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = - SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Inner::EHV090); /// HV00A - pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); + pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Inner::EHV00A); /// HV009 - pub const FDW_INVALID_USE_OF_NULL_POINTER: SqlState = SqlState(Cow::Borrowed("HV009")); + pub const FDW_INVALID_USE_OF_NULL_POINTER: SqlState = SqlState(Inner::EHV009); /// HV014 - pub const FDW_TOO_MANY_HANDLES: SqlState = SqlState(Cow::Borrowed("HV014")); + pub const FDW_TOO_MANY_HANDLES: SqlState = SqlState(Inner::EHV014); /// HV001 - pub const FDW_OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("HV001")); + pub const FDW_OUT_OF_MEMORY: SqlState = SqlState(Inner::EHV001); /// HV00P - pub const FDW_NO_SCHEMAS: SqlState = SqlState(Cow::Borrowed("HV00P")); + pub const FDW_NO_SCHEMAS: SqlState = SqlState(Inner::EHV00P); /// HV00J - pub const FDW_OPTION_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00J")); + pub const FDW_OPTION_NAME_NOT_FOUND: SqlState = SqlState(Inner::EHV00J); /// HV00K - pub const FDW_REPLY_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00K")); + pub const FDW_REPLY_HANDLE: SqlState = SqlState(Inner::EHV00K); /// HV00Q - pub const FDW_SCHEMA_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00Q")); + pub const FDW_SCHEMA_NOT_FOUND: SqlState = SqlState(Inner::EHV00Q); /// HV00R - pub const FDW_TABLE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00R")); + pub const FDW_TABLE_NOT_FOUND: SqlState = SqlState(Inner::EHV00R); /// HV00L - pub const FDW_UNABLE_TO_CREATE_EXECUTION: SqlState = SqlState(Cow::Borrowed("HV00L")); + pub const FDW_UNABLE_TO_CREATE_EXECUTION: SqlState = SqlState(Inner::EHV00L); /// HV00M - pub const FDW_UNABLE_TO_CREATE_REPLY: SqlState = SqlState(Cow::Borrowed("HV00M")); + pub const FDW_UNABLE_TO_CREATE_REPLY: SqlState = SqlState(Inner::EHV00M); /// HV00N - pub const FDW_UNABLE_TO_ESTABLISH_CONNECTION: SqlState = SqlState(Cow::Borrowed("HV00N")); + pub const FDW_UNABLE_TO_ESTABLISH_CONNECTION: SqlState = SqlState(Inner::EHV00N); /// P0000 - pub const PLPGSQL_ERROR: SqlState = SqlState(Cow::Borrowed("P0000")); + pub const PLPGSQL_ERROR: SqlState = SqlState(Inner::EP0000); /// P0001 - pub const RAISE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("P0001")); + pub const RAISE_EXCEPTION: SqlState = SqlState(Inner::EP0001); /// P0002 - pub const NO_DATA_FOUND: SqlState = SqlState(Cow::Borrowed("P0002")); + pub const NO_DATA_FOUND: SqlState = SqlState(Inner::EP0002); /// P0003 - pub const TOO_MANY_ROWS: SqlState = SqlState(Cow::Borrowed("P0003")); + pub const TOO_MANY_ROWS: SqlState = SqlState(Inner::EP0003); /// P0004 - pub const ASSERT_FAILURE: SqlState = SqlState(Cow::Borrowed("P0004")); + pub const ASSERT_FAILURE: SqlState = SqlState(Inner::EP0004); /// XX000 - pub const INTERNAL_ERROR: SqlState = SqlState(Cow::Borrowed("XX000")); + pub const INTERNAL_ERROR: SqlState = SqlState(Inner::EXX000); /// XX001 - pub const DATA_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX001")); + pub const DATA_CORRUPTED: SqlState = SqlState(Inner::EXX001); /// XX002 - pub const INDEX_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX002")); + pub const INDEX_CORRUPTED: SqlState = SqlState(Inner::EXX002); } + +#[derive(PartialEq, Eq, Clone, Debug)] +enum Inner { + E00000, + E01000, + E0100C, + E01008, + E01003, + E01007, + E01006, + E01004, + E01P01, + E02000, + E02001, + E03000, + E08000, + E08003, + E08006, + E08001, + E08004, + E08007, + E08P01, + E09000, + E0A000, + E0B000, + E0F000, + E0F001, + E0L000, + E0LP01, + E0P000, + E0Z000, + E0Z002, + E20000, + E21000, + E22000, + E2202E, + E22021, + E22008, + E22012, + E22005, + E2200B, + E22022, + E22015, + E2201E, + E22014, + E22016, + E2201F, + E2201G, + E22018, + E22007, + E22019, + E2200D, + E22025, + E22P06, + E22010, + E22023, + E22013, + E2201B, + E2201W, + E2201X, + E2202H, + E2202G, + E22009, + E2200C, + E2200G, + E22004, + E22002, + E22003, + E2200H, + E22026, + E22001, + E22011, + E22027, + E22024, + E2200F, + E22P01, + E22P02, + E22P03, + E22P04, + E22P05, + E2200L, + E2200M, + E2200N, + E2200S, + E2200T, + E22030, + E22031, + E22032, + E22033, + E22034, + E22035, + E22036, + E22037, + E22038, + E22039, + E2203A, + E2203B, + E2203C, + E2203D, + E2203E, + E2203F, + E23000, + E23001, + E23502, + E23503, + E23505, + E23514, + E23P01, + E24000, + E25000, + E25001, + E25002, + E25008, + E25003, + E25004, + E25005, + E25006, + E25007, + E25P01, + E25P02, + E25P03, + E26000, + E27000, + E28000, + E28P01, + E2B000, + E2BP01, + E2D000, + E2F000, + E2F005, + E2F002, + E2F003, + E2F004, + E34000, + E38000, + E38001, + E38002, + E38003, + E38004, + E39000, + E39001, + E39004, + E39P01, + E39P02, + E39P03, + E3B000, + E3B001, + E3D000, + E3F000, + E40000, + E40002, + E40001, + E40003, + E40P01, + E42000, + E42601, + E42501, + E42846, + E42803, + E42P20, + E42P19, + E42830, + E42602, + E42622, + E42939, + E42804, + E42P18, + E42P21, + E42P22, + E42809, + E428C9, + E42703, + E42883, + E42P01, + E42P02, + E42704, + E42701, + E42P03, + E42P04, + E42723, + E42P05, + E42P06, + E42P07, + E42712, + E42710, + E42702, + E42725, + E42P08, + E42P09, + E42P10, + E42611, + E42P11, + E42P12, + E42P13, + E42P14, + E42P15, + E42P16, + E42P17, + E44000, + E53000, + E53100, + E53200, + E53300, + E53400, + E54000, + E54001, + E54011, + E54023, + E55000, + E55006, + E55P02, + E55P03, + E55P04, + E57000, + E57014, + E57P01, + E57P02, + E57P03, + E57P04, + E58000, + E58030, + E58P01, + E58P02, + E72000, + EF0000, + EF0001, + EHV000, + EHV005, + EHV002, + EHV010, + EHV021, + EHV024, + EHV007, + EHV008, + EHV004, + EHV006, + EHV091, + EHV00B, + EHV00C, + EHV00D, + EHV090, + EHV00A, + EHV009, + EHV014, + EHV001, + EHV00P, + EHV00J, + EHV00K, + EHV00Q, + EHV00R, + EHV00L, + EHV00M, + EHV00N, + EP0000, + EP0001, + EP0002, + EP0003, + EP0004, + EXX000, + EXX001, + EXX002, + Other(Box), +} + #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { From 91ce9cdeec624511f79b39debfcf12bdac62178e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 24 Apr 2021 10:34:51 -0400 Subject: [PATCH 504/819] fix clippy --- codegen/src/sqlstate.rs | 1 + tokio-postgres/src/error/sqlstate.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index ea3045654..d21b92eec 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -113,6 +113,7 @@ fn make_inner(codes: &LinkedHashMap>, file: &mut BufWriter Date: Sun, 25 Apr 2021 10:52:25 -0400 Subject: [PATCH 505/819] Release v0.7.2 --- tokio-postgres/CHANGELOG.md | 8 +++++++- tokio-postgres/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 7cc6c7071..3a7aa2ae7 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,6 +1,12 @@ # Change Log -## v0.7.1 - 2020-04-03 +## v0.7.2 - 2021-04-25 + +### Fixed + +* `SqlState` constants can now be used in `match` patterns. + +## v0.7.1 - 2021-04-03 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b1d093d4b..780c31963 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.1" +version = "0.7.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 20f0d76459a20b934c9863c7638bd94f4fee506f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 29 Apr 2021 20:58:01 +0000 Subject: [PATCH 506/819] Upgrade to GitHub-native Dependabot --- .github/dependabot.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..a64a91b02 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + time: "13:00" + open-pull-requests-limit: 10 + ignore: + - dependency-name: socket2 + versions: + - 0.4.0 From 0f0de8c34d3858e188079060b75dd818ae469115 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 29 Apr 2021 17:00:22 -0400 Subject: [PATCH 507/819] Update dependabot.yml --- .github/dependabot.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a64a91b02..1332f8eb5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,7 +6,3 @@ updates: interval: daily time: "13:00" open-pull-requests-limit: 10 - ignore: - - dependency-name: socket2 - versions: - - 0.4.0 From 83616fadb5c0b88b34d1f83478ac6d46546d2b31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Apr 2021 21:02:13 +0000 Subject: [PATCH 508/819] Update hmac requirement from 0.10 to 0.11 Updates the requirements on [hmac](https://github.com/RustCrypto/MACs) to permit the latest version. - [Release notes](https://github.com/RustCrypto/MACs/releases) - [Commits](https://github.com/RustCrypto/MACs/compare/hmac-v0.10.0...hmac-v0.11.0) Signed-off-by: dependabot[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 4fd288697..d4ae8c301 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,7 +13,7 @@ base64 = "0.13" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" -hmac = "0.10" +hmac = "0.11" md-5 = "0.9" memchr = "2.0" rand = "0.8" From 4e8b9078a194c6b65bdff6882a76b8361a53a02e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 29 Apr 2021 17:07:24 -0400 Subject: [PATCH 509/819] fix build --- postgres-protocol/src/authentication/sasl.rs | 17 +++++++++-------- postgres-protocol/src/password/mod.rs | 8 ++++---- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 7c0d1754f..a3704ce16 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -33,7 +33,8 @@ fn normalize(pass: &[u8]) -> Vec { } pub(crate) fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { - let mut hmac = Hmac::::new_varkey(str).expect("HMAC is able to accept all key sizes"); + let mut hmac = + Hmac::::new_from_slice(str).expect("HMAC is able to accept all key sizes"); hmac.update(salt); hmac.update(&[0, 0, 0, 1]); let mut prev = hmac.finalize().into_bytes(); @@ -41,7 +42,7 @@ pub(crate) fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { let mut hi = prev; for _ in 1..i { - let mut hmac = Hmac::::new_varkey(str).expect("already checked above"); + let mut hmac = Hmac::::new_from_slice(str).expect("already checked above"); hmac.update(&prev); prev = hmac.finalize().into_bytes(); @@ -195,7 +196,7 @@ impl ScramSha256 { let salted_password = hi(&password, &salt, parsed.iteration_count); - let mut hmac = Hmac::::new_varkey(&salted_password) + let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Client Key"); let client_key = hmac.finalize().into_bytes(); @@ -214,8 +215,8 @@ impl ScramSha256 { let auth_message = format!("n=,r={},{},{}", client_nonce, message, self.message); - let mut hmac = - Hmac::::new_varkey(&stored_key).expect("HMAC is able to accept all key sizes"); + let mut hmac = Hmac::::new_from_slice(&stored_key) + .expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); let client_signature = hmac.finalize().into_bytes(); @@ -266,13 +267,13 @@ impl ScramSha256 { Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; - let mut hmac = Hmac::::new_varkey(&salted_password) + let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); hmac.update(b"Server Key"); let server_key = hmac.finalize().into_bytes(); - let mut hmac = - Hmac::::new_varkey(&server_key).expect("HMAC is able to accept all key sizes"); + let mut hmac = Hmac::::new_from_slice(&server_key) + .expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); hmac.verify(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs index ccb95c79b..1b32ae8f8 100644 --- a/postgres-protocol/src/password/mod.rs +++ b/postgres-protocol/src/password/mod.rs @@ -61,8 +61,8 @@ pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_ let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS); // client key - let mut hmac = - Hmac::::new_varkey(&salted_password).expect("HMAC is able to accept all key sizes"); + let mut hmac = Hmac::::new_from_slice(&salted_password) + .expect("HMAC is able to accept all key sizes"); hmac.update(b"Client Key"); let client_key = hmac.finalize().into_bytes(); @@ -72,8 +72,8 @@ pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_ let stored_key = hash.finalize_fixed(); // server key - let mut hmac = - Hmac::::new_varkey(&salted_password).expect("HMAC is able to accept all key sizes"); + let mut hmac = Hmac::::new_from_slice(&salted_password) + .expect("HMAC is able to accept all key sizes"); hmac.update(b"Server Key"); let server_key = hmac.finalize().into_bytes(); From a84a45d88ed157c8756d5e54b2b13467721e5d12 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 May 2021 14:59:07 +0100 Subject: [PATCH 510/819] Fix deadlock when pipelining statements. When executing statements in parallel there is a race where we prepare the type info queries multiple times, and so insert into the type info caches multiple times. This resulted in any existing cached `Statement` to be dropped, running its destructor which attempts to take out the state lock that is already being held, resulting in a deadlock. Fixes #772. --- tokio-postgres/src/client.rs | 45 +++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index f19005e55..b417c1396 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -55,8 +55,17 @@ impl Responses { } struct State { + /// A cached prepared statement for basic information for a type from its + /// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its + /// fallback). typeinfo: Option, + /// A cached prepared statement for getting information for a composite type + /// from its OID. Corresponds to + /// [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY). typeinfo_composite: Option, + /// A cached prepared statement for getting information for a composite type + /// from its OID. Corresponds to + /// [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY) (or its fallback). typeinfo_enum: Option, types: HashMap, buf: BytesMut, @@ -86,7 +95,17 @@ impl InnerClient { } pub fn set_typeinfo(&self, statement: &Statement) { - self.state.lock().typeinfo = Some(statement.clone()); + // We only insert the statement if there isn't already a cached + // statement (this is safe as they are prepared statements for the same + // query). + // + // Note: We need to be sure that we don't drop a Statement while holding + // the state lock as its drop handling will call `with_buf`, which tries + // to take the lock. + let mut state = self.state.lock(); + if state.typeinfo.is_none() { + state.typeinfo = Some(statement.clone()); + } } pub fn typeinfo_composite(&self) -> Option { @@ -94,7 +113,17 @@ impl InnerClient { } pub fn set_typeinfo_composite(&self, statement: &Statement) { - self.state.lock().typeinfo_composite = Some(statement.clone()); + // We only insert the statement if there isn't already a cached + // statement (this is safe as they are prepared statements for the same + // query). + // + // Note: We need to be sure that we don't drop a Statement while holding + // the state lock as its drop handling will call `with_buf`, which tries + // to take the lock. + let mut state = self.state.lock(); + if state.typeinfo_composite.is_none() { + state.typeinfo_composite = Some(statement.clone()); + } } pub fn typeinfo_enum(&self) -> Option { @@ -102,7 +131,17 @@ impl InnerClient { } pub fn set_typeinfo_enum(&self, statement: &Statement) { - self.state.lock().typeinfo_enum = Some(statement.clone()); + // We only insert the statement if there isn't already a cached + // statement (this is safe as they are prepared statements for the same + // query). + // + // Note: We need to be sure that we don't drop a Statement while holding + // the state lock as its drop handling will call `with_buf`, which tries + // to take the lock. + let mut state = self.state.lock(); + if state.typeinfo_enum.is_none() { + state.typeinfo_enum = Some(statement.clone()); + } } pub fn type_(&self, oid: Oid) -> Option { From b7215c60d9584a8fd4245cc85ccce8aba998637d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 May 2021 15:20:36 +0100 Subject: [PATCH 511/819] Split State into two. There is no reason for the buffer and typeinfo caches to share the same lock. By splitting them it means we a) get slightly better performance, but more importantly b) it makes it harder to accidentally deadlock. --- tokio-postgres/src/client.rs | 81 +++++++++++++++++------------------- 1 file changed, 38 insertions(+), 43 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index b417c1396..4e8babf1d 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -54,26 +54,32 @@ impl Responses { } } -struct State { - /// A cached prepared statement for basic information for a type from its +/// A cache of type info and prepared statements for fetching type info +/// (corresponding to the queries in the [prepare](prepare) module). +#[derive(Default)] +struct CachedTypeInfo { + /// A statement for basic information for a type from its /// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its /// fallback). typeinfo: Option, - /// A cached prepared statement for getting information for a composite type - /// from its OID. Corresponds to - /// [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY). + /// A statement for getting information for a composite type from its OID. + /// Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY). typeinfo_composite: Option, - /// A cached prepared statement for getting information for a composite type - /// from its OID. Corresponds to - /// [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY) (or its fallback). + /// A statement for getting information for a composite type from its OID. + /// Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY) (or + /// its fallback). typeinfo_enum: Option, + + /// Cache of types already looked up. types: HashMap, - buf: BytesMut, } pub struct InnerClient { sender: mpsc::UnboundedSender, - state: Mutex, + cached_typeinfo: Mutex, + + /// A buffer to use when writing out postgres commands. + buffer: Mutex, } impl InnerClient { @@ -91,7 +97,7 @@ impl InnerClient { } pub fn typeinfo(&self) -> Option { - self.state.lock().typeinfo.clone() + self.cached_typeinfo.lock().typeinfo.clone() } pub fn set_typeinfo(&self, statement: &Statement) { @@ -102,67 +108,61 @@ impl InnerClient { // Note: We need to be sure that we don't drop a Statement while holding // the state lock as its drop handling will call `with_buf`, which tries // to take the lock. - let mut state = self.state.lock(); - if state.typeinfo.is_none() { - state.typeinfo = Some(statement.clone()); + let mut cache = self.cached_typeinfo.lock(); + if cache.typeinfo.is_none() { + cache.typeinfo = Some(statement.clone()); } } pub fn typeinfo_composite(&self) -> Option { - self.state.lock().typeinfo_composite.clone() + self.cached_typeinfo.lock().typeinfo_composite.clone() } pub fn set_typeinfo_composite(&self, statement: &Statement) { // We only insert the statement if there isn't already a cached // statement (this is safe as they are prepared statements for the same // query). - // - // Note: We need to be sure that we don't drop a Statement while holding - // the state lock as its drop handling will call `with_buf`, which tries - // to take the lock. - let mut state = self.state.lock(); - if state.typeinfo_composite.is_none() { - state.typeinfo_composite = Some(statement.clone()); + let mut cache = self.cached_typeinfo.lock(); + if cache.typeinfo_composite.is_none() { + cache.typeinfo_composite = Some(statement.clone()); } } pub fn typeinfo_enum(&self) -> Option { - self.state.lock().typeinfo_enum.clone() + self.cached_typeinfo.lock().typeinfo_enum.clone() } pub fn set_typeinfo_enum(&self, statement: &Statement) { // We only insert the statement if there isn't already a cached // statement (this is safe as they are prepared statements for the same // query). - // - // Note: We need to be sure that we don't drop a Statement while holding - // the state lock as its drop handling will call `with_buf`, which tries - // to take the lock. - let mut state = self.state.lock(); - if state.typeinfo_enum.is_none() { - state.typeinfo_enum = Some(statement.clone()); + let mut cache = self.cached_typeinfo.lock(); + if cache.typeinfo_enum.is_none() { + cache.typeinfo_enum = Some(statement.clone()); } } pub fn type_(&self, oid: Oid) -> Option { - self.state.lock().types.get(&oid).cloned() + self.cached_typeinfo.lock().types.get(&oid).cloned() } pub fn set_type(&self, oid: Oid, type_: &Type) { - self.state.lock().types.insert(oid, type_.clone()); + self.cached_typeinfo.lock().types.insert(oid, type_.clone()); } pub fn clear_type_cache(&self) { - self.state.lock().types.clear(); + self.cached_typeinfo.lock().types.clear(); } + /// Call the given function with a buffer to be used when writing out + /// postgres commands. pub fn with_buf(&self, f: F) -> R where F: FnOnce(&mut BytesMut) -> R, { - let mut state = self.state.lock(); - let r = f(&mut state.buf); - state.buf.clear(); + let mut buffer = self.buffer.lock(); + let r = f(&mut buffer); + buffer.clear(); r } } @@ -199,13 +199,8 @@ impl Client { Client { inner: Arc::new(InnerClient { sender, - state: Mutex::new(State { - typeinfo: None, - typeinfo_composite: None, - typeinfo_enum: None, - types: HashMap::new(), - buf: BytesMut::new(), - }), + cached_typeinfo: Default::default(), + buffer: Default::default(), }), #[cfg(feature = "runtime")] socket_config: None, From 844a1bd145c0099ea1a31de145a6ecc8fc2a699b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 15 May 2021 10:25:14 +0100 Subject: [PATCH 512/819] Revert change back to always setting the cached statement --- tokio-postgres/src/client.rs | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 4e8babf1d..4a099d941 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -101,17 +101,7 @@ impl InnerClient { } pub fn set_typeinfo(&self, statement: &Statement) { - // We only insert the statement if there isn't already a cached - // statement (this is safe as they are prepared statements for the same - // query). - // - // Note: We need to be sure that we don't drop a Statement while holding - // the state lock as its drop handling will call `with_buf`, which tries - // to take the lock. - let mut cache = self.cached_typeinfo.lock(); - if cache.typeinfo.is_none() { - cache.typeinfo = Some(statement.clone()); - } + self.cached_typeinfo.lock().typeinfo = Some(statement.clone()); } pub fn typeinfo_composite(&self) -> Option { @@ -119,13 +109,7 @@ impl InnerClient { } pub fn set_typeinfo_composite(&self, statement: &Statement) { - // We only insert the statement if there isn't already a cached - // statement (this is safe as they are prepared statements for the same - // query). - let mut cache = self.cached_typeinfo.lock(); - if cache.typeinfo_composite.is_none() { - cache.typeinfo_composite = Some(statement.clone()); - } + self.cached_typeinfo.lock().typeinfo_composite = Some(statement.clone()); } pub fn typeinfo_enum(&self) -> Option { @@ -133,13 +117,7 @@ impl InnerClient { } pub fn set_typeinfo_enum(&self, statement: &Statement) { - // We only insert the statement if there isn't already a cached - // statement (this is safe as they are prepared statements for the same - // query). - let mut cache = self.cached_typeinfo.lock(); - if cache.typeinfo_enum.is_none() { - cache.typeinfo_enum = Some(statement.clone()); - } + self.cached_typeinfo.lock().typeinfo_enum = Some(statement.clone()); } pub fn type_(&self, oid: Oid) -> Option { From 52de2693670ee4b1a6d571f176ea4dd44117db43 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 18 May 2021 20:47:26 -0400 Subject: [PATCH 513/819] fix clippy --- postgres-protocol/src/message/backend.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 68b5aa6e5..45e5c4074 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -450,9 +450,9 @@ impl CopyDataBody { } pub struct CopyInResponseBody { - storage: Bytes, - len: u16, format: u8, + len: u16, + storage: Bytes, } impl CopyInResponseBody { @@ -504,9 +504,9 @@ impl<'a> FallibleIterator for ColumnFormats<'a> { } pub struct CopyOutResponseBody { - storage: Bytes, - len: u16, format: u8, + len: u16, + storage: Bytes, } impl CopyOutResponseBody { From ca6d4b816221214798bc68f2b182f9fa822e115f Mon Sep 17 00:00:00 2001 From: Petros Angelatos Date: Mon, 24 May 2021 17:54:24 +0200 Subject: [PATCH 514/819] tokio-postgres: buffer sockets to avoid excessive syscalls The current implementation forwards all read requests to the operating system through the socket causing excessive system calls. The effect is magnified when the underlying Socket is wrapped around a TLS implementation. This commit changes the underlying socket to be read-buffered by default with a buffer size of 16K, following the implementation of the official client. Signed-off-by: Petros Angelatos --- postgres-native-tls/src/lib.rs | 5 +++-- postgres-openssl/src/lib.rs | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 70e34812d..2f2e6e6ad 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -51,7 +51,7 @@ use std::future::Future; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::io::{AsyncRead, AsyncWrite, BufReader, ReadBuf}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; @@ -115,6 +115,7 @@ where type Future = Pin, native_tls::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { + let stream = BufReader::with_capacity(8192, stream); let future = async move { let stream = self.connector.connect(&self.domain, stream).await?; @@ -126,7 +127,7 @@ where } /// The stream returned by `TlsConnector`. -pub struct TlsStream(tokio_native_tls::TlsStream); +pub struct TlsStream(tokio_native_tls::TlsStream>); impl AsyncRead for TlsStream where diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index dce3dff5d..f3c0b9309 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -57,7 +57,7 @@ use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::io::{AsyncRead, AsyncWrite, BufReader, ReadBuf}; use tokio_openssl::SslStream; use tokio_postgres::tls; #[cfg(feature = "runtime")] @@ -140,6 +140,7 @@ where type Future = Pin, Self::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { + let stream = BufReader::with_capacity(8192, stream); let future = async move { let ssl = self.ssl.into_ssl(&self.domain)?; let mut stream = SslStream::new(ssl, stream)?; @@ -182,7 +183,7 @@ impl Error for ConnectError { } /// The stream returned by `TlsConnector`. -pub struct TlsStream(SslStream); +pub struct TlsStream(SslStream>); impl AsyncRead for TlsStream where From b03ffcd043722ec74d1075514bf9ad8e061954b8 Mon Sep 17 00:00:00 2001 From: Marcin Pajkowski Date: Sat, 29 May 2021 23:43:22 +0200 Subject: [PATCH 515/819] expose SimpleQueryRow's column names --- tokio-postgres/src/row.rs | 19 +++++++++++++++++-- tokio-postgres/src/simple_query.rs | 21 +++++++++++++++++++-- tokio-postgres/tests/test/main.rs | 4 ++++ 3 files changed, 40 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 842216ad2..e3ed696c1 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -1,6 +1,7 @@ //! Rows. use crate::row::sealed::{AsName, Sealed}; +use crate::simple_query::SimpleColumn; use crate::statement::Column; use crate::types::{FromSql, Type, WrongType}; use crate::{Error, Statement}; @@ -188,16 +189,25 @@ impl Row { } } +impl AsName for SimpleColumn { + fn as_name(&self) -> &str { + self.name() + } +} + /// A row of data returned from the database by a simple query. pub struct SimpleQueryRow { - columns: Arc<[String]>, + columns: Arc<[SimpleColumn]>, body: DataRowBody, ranges: Vec>>, } impl SimpleQueryRow { #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(columns: Arc<[String]>, body: DataRowBody) -> Result { + pub(crate) fn new( + columns: Arc<[SimpleColumn]>, + body: DataRowBody, + ) -> Result { let ranges = body.ranges().collect().map_err(Error::parse)?; Ok(SimpleQueryRow { columns, @@ -206,6 +216,11 @@ impl SimpleQueryRow { }) } + /// Returns information about the columns of data in the row. + pub fn columns(&self) -> &[SimpleColumn] { + &self.columns + } + /// Determines if the row contains no values. pub fn is_empty(&self) -> bool { self.len() == 0 diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 82ac35664..ade2e1d6d 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -14,6 +14,22 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +/// Information about a column of a single query row. +pub struct SimpleColumn { + name: String, +} + +impl SimpleColumn { + pub(crate) fn new(name: String) -> SimpleColumn { + SimpleColumn { name } + } + + /// Returns the name of the column. + pub fn name(&self) -> &str { + &self.name + } +} + pub async fn simple_query(client: &InnerClient, query: &str) -> Result { debug!("executing simple query: {}", query); @@ -56,7 +72,7 @@ pin_project! { /// A stream of simple query results. pub struct SimpleQueryStream { responses: Responses, - columns: Option>, + columns: Option>, #[pin] _p: PhantomPinned, } @@ -86,10 +102,11 @@ impl Stream for SimpleQueryStream { Message::RowDescription(body) => { let columns = body .fields() - .map(|f| Ok(f.name().to_string())) + .map(|f| Ok(SimpleColumn::new(f.name().to_string()))) .collect::>() .map_err(Error::parse)? .into(); + *this.columns = Some(columns); } Message::DataRow(body) => { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index c367dbea3..c0b4bf202 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -282,6 +282,8 @@ async fn simple_query() { } match &messages[2] { SimpleQueryMessage::Row(row) => { + assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); + assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); assert_eq!(row.get(0), Some("1")); assert_eq!(row.get(1), Some("steven")); } @@ -289,6 +291,8 @@ async fn simple_query() { } match &messages[3] { SimpleQueryMessage::Row(row) => { + assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); + assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); assert_eq!(row.get(0), Some("2")); assert_eq!(row.get(1), Some("joe")); } From a8383dcc2970d5720ee8097c48c1d4c507a24eab Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Thu, 3 Jun 2021 10:54:37 +1000 Subject: [PATCH 516/819] Add support for eui48 version 1.0 --- postgres-types/Cargo.toml | 2 ++ postgres-types/src/eui48_1.rs | 27 ++++++++++++++++++++++ postgres-types/src/lib.rs | 2 ++ postgres/Cargo.toml | 1 + postgres/src/lib.rs | 3 ++- tokio-postgres/Cargo.toml | 2 ++ tokio-postgres/src/lib.rs | 3 ++- tokio-postgres/tests/test/types/eui48_1.rs | 18 +++++++++++++++ tokio-postgres/tests/test/types/mod.rs | 2 ++ 9 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 postgres-types/src/eui48_1.rs create mode 100644 tokio-postgres/tests/test/types/eui48_1.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 1d7f2cc9a..b258cee12 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -15,6 +15,7 @@ derive = ["postgres-derive"] with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] +with-eui48-1 = ["eui48-1"] with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] @@ -30,6 +31,7 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } +eui48-1 = { version = "1.0", package = "eui48", optional = true } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } diff --git a/postgres-types/src/eui48_1.rs b/postgres-types/src/eui48_1.rs new file mode 100644 index 000000000..4c35e63ce --- /dev/null +++ b/postgres-types/src/eui48_1.rs @@ -0,0 +1,27 @@ +use bytes::BytesMut; +use eui48_1::MacAddress; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for MacAddress { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let bytes = types::macaddr_from_sql(raw)?; + Ok(MacAddress::new(bytes)) + } + + accepts!(MACADDR); +} + +impl ToSql for MacAddress { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let mut bytes = [0; 6]; + bytes.copy_from_slice(self.as_bytes()); + types::macaddr_to_sql(bytes, w); + Ok(IsNull::No) + } + + accepts!(MACADDR); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 5c483bd76..ed6f75cf5 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -194,6 +194,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; +#[cfg(feature = "with-eui48-1")] +mod eui48_1; #[cfg(feature = "with-geo-types-0_6")] mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 18219782d..c7c0746f0 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -24,6 +24,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] +with-eui48-1 = ["tokio-postgres/with-eui48-1"] with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 4513aeef7..7d96bfd9f 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -55,7 +55,8 @@ //! | ------- | ----------- | ------------------ | ------- | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 780c31963..fa1b50397 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -30,6 +30,7 @@ runtime = ["tokio/net", "tokio/time"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] +with-eui48-1 = ["postgres-types/with-eui48-1"] with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] @@ -61,6 +62,7 @@ criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-04 = { version = "0.4", package = "eui48" } +eui48-1 = { version = "1.0", package = "eui48" } geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 77713bb11..6dd0b0151 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -106,7 +106,8 @@ //! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 1.0 with the features `net` and `time` | yes | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | diff --git a/tokio-postgres/tests/test/types/eui48_1.rs b/tokio-postgres/tests/test/types/eui48_1.rs new file mode 100644 index 000000000..0c22e9e87 --- /dev/null +++ b/tokio-postgres/tests/test/types/eui48_1.rs @@ -0,0 +1,18 @@ +use eui48_1::MacAddress; + +use crate::types::test_type; + +#[tokio::test] +async fn test_eui48_params() { + test_type( + "MACADDR", + &[ + ( + Some(MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), + "'12-34-56-ab-cd-ef'", + ), + (None, "NULL"), + ], + ) + .await +} diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index bc31ece71..85eed0e27 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -19,6 +19,8 @@ mod bit_vec_06; mod chrono_04; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; +#[cfg(feature = "with-eui48-1")] +mod eui48_1; #[cfg(feature = "with-geo-types-0_6")] mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] From 57cacb65fe24762ccfc7889c06b9e906a6408588 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Jun 2021 21:30:41 -0400 Subject: [PATCH 517/819] Upgrade phf --- codegen/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/error/sqlstate.rs | 552 +++++++++++++-------------- 3 files changed, 278 insertions(+), 278 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 8ff4d58be..fc02751cf 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.8" +phf_codegen = "0.9" regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index fa1b50397..3a1537a87 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -47,7 +47,7 @@ log = "0.4" parking_lot = "0.11" percent-encoding = "2.0" pin-project-lite = "0.2" -phf = "0.8" +phf = "0.9" postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } postgres-types = { version = "0.2.1", path = "../postgres-types" } socket2 = "0.4" diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 125124d12..1996d9b13 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1340,319 +1340,319 @@ enum Inner { #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 732231254413039614, + key: 12913932095322966823, disps: ::phf::Slice::Static(&[ - (0, 6), + (0, 12), + (0, 18), + (0, 25), + (0, 109), + (0, 147), + (0, 74), (0, 0), - (0, 218), - (0, 11), - (0, 31), - (0, 91), - (0, 55), + (7, 117), + (5, 221), + (0, 26), + (1, 45), + (0, 93), + (0, 25), + (0, 61), + (1, 221), + (10, 17), (0, 77), - (0, 72), + (2, 3), + (0, 216), + (0, 0), (0, 1), - (0, 73), - (1, 159), - (4, 4), - (0, 18), - (2, 100), - (0, 19), - (0, 16), - (0, 22), - (0, 51), + (1, 168), + (0, 64), + (0, 2), + (0, 7), + (1, 37), + (0, 83), + (3, 24), (0, 0), + (0, 109), + (18, 9), + (1, 230), (0, 0), - (1, 2), - (2, 177), - (0, 10), - (1, 192), + (0, 4), + (0, 171), (0, 0), - (5, 245), - (0, 106), - (6, 243), - (47, 195), - (0, 146), - (4, 154), - (0, 2), - (4, 78), - (0, 196), - (0, 8), - (2, 146), - (0, 15), - (0, 170), - (0, 5), - (10, 18), - (0, 30), - (0, 33), - (0, 2), + (34, 97), + (2, 126), + (44, 49), + (5, 182), + (0, 1), + (0, 1), + (0, 71), + (0, 4), + (5, 164), (0, 0), - (47, 181), - (0, 144), - (39, 231), - (39, 173), - (0, 57), - (0, 7), - (1, 154), + (0, 96), + (13, 58), + (0, 58), + (0, 242), + (0, 72), + (16, 53), ]), entries: ::phf::Slice::Static(&[ - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("01000", SqlState::WARNING), - ("02000", SqlState::NO_DATA), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("HV000", SqlState::FDW_ERROR), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("57P01", SqlState::ADMIN_SHUTDOWN), ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("22027", SqlState::TRIM_ERROR), - ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("42804", SqlState::DATATYPE_MISMATCH), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), ("42703", SqlState::UNDEFINED_COLUMN), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("58030", SqlState::IO_ERROR), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("42P20", SqlState::WINDOWING_ERROR), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("57014", SqlState::QUERY_CANCELED), - ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("42939", SqlState::RESERVED_NAME), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("53200", SqlState::OUT_OF_MEMORY), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("53100", SqlState::DISK_FULL), - ("P0003", SqlState::TOO_MANY_ROWS), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("XX000", SqlState::INTERNAL_ERROR), - ("58P01", SqlState::UNDEFINED_FILE), - ("42601", SqlState::SYNTAX_ERROR), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("XX002", SqlState::INDEX_CORRUPTED), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("42P21", SqlState::COLLATION_MISMATCH), ("42P07", SqlState::DUPLICATE_TABLE), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("XX001", SqlState::DATA_CORRUPTED), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("22032", SqlState::INVALID_JSON_TEXT), ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("42602", SqlState::INVALID_NAME), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("57P03", SqlState::CANNOT_CONNECT_NOW), - ("58P02", SqlState::DUPLICATE_FILE), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("02000", SqlState::NO_DATA), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("42601", SqlState::SYNTAX_ERROR), + ("22012", SqlState::DIVISION_BY_ZERO), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("P0003", SqlState::TOO_MANY_ROWS), + ("57P04", SqlState::DATABASE_DROPPED), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("42P08", SqlState::AMBIGUOUS_PARAMETER), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("20000", SqlState::CASE_NOT_FOUND), + ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), + ("42939", SqlState::RESERVED_NAME), + ("42602", SqlState::INVALID_NAME), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("428C9", SqlState::GENERATED_ALWAYS), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("53100", SqlState::DISK_FULL), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), ("42712", SqlState::DUPLICATE_ALIAS), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("42710", SqlState::DUPLICATE_OBJECT), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("42P19", SqlState::INVALID_RECURSION), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("42622", SqlState::NAME_TOO_LONG), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("01000", SqlState::WARNING), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("57P01", SqlState::ADMIN_SHUTDOWN), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), ("57000", SqlState::OPERATOR_INTERVENTION), ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), - ("P0002", SqlState::NO_DATA_FOUND), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("22032", SqlState::INVALID_JSON_TEXT), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("428C9", SqlState::GENERATED_ALWAYS), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("22012", SqlState::DIVISION_BY_ZERO), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("42846", SqlState::CANNOT_COERCE), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("42704", SqlState::UNDEFINED_OBJECT), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("54001", SqlState::STATEMENT_TOO_COMPLEX), + ("53200", SqlState::OUT_OF_MEMORY), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("XX000", SqlState::INTERNAL_ERROR), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("23505", SqlState::UNIQUE_VIOLATION), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("54011", SqlState::TOO_MANY_COLUMNS), + ("XX002", SqlState::INDEX_CORRUPTED), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("55006", SqlState::OBJECT_IN_USE), + ("42P01", SqlState::UNDEFINED_TABLE), ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("P0000", SqlState::PLPGSQL_ERROR), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("23505", SqlState::UNIQUE_VIOLATION), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), ("P0004", SqlState::ASSERT_FAILURE), - ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("58000", SqlState::SYSTEM_ERROR), + ("42P21", SqlState::COLLATION_MISMATCH), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("28P01", SqlState::INVALID_PASSWORD), + ("22011", SqlState::SUBSTRING_ERROR), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("08006", SqlState::CONNECTION_FAILURE), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), ("21000", SqlState::CARDINALITY_VIOLATION), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("42803", SqlState::GROUPING_ERROR), + ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("58000", SqlState::SYSTEM_ERROR), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("0L000", SqlState::INVALID_GRANTOR), ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("42846", SqlState::CANNOT_COERCE), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("20000", SqlState::CASE_NOT_FOUND), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("58P01", SqlState::UNDEFINED_FILE), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), ("22000", SqlState::DATA_EXCEPTION), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("42P19", SqlState::INVALID_RECURSION), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("P0000", SqlState::PLPGSQL_ERROR), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("42710", SqlState::DUPLICATE_OBJECT), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("HV000", SqlState::FDW_ERROR), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("57014", SqlState::QUERY_CANCELED), + ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("42701", SqlState::DUPLICATE_COLUMN), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("54023", SqlState::TOO_MANY_ARGUMENTS), ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("55006", SqlState::OBJECT_IN_USE), - ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("22027", SqlState::TRIM_ERROR), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("P0001", SqlState::RAISE_EXCEPTION), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("3D000", SqlState::INVALID_CATALOG_NAME), ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("08006", SqlState::CONNECTION_FAILURE), - ("42P01", SqlState::UNDEFINED_TABLE), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("0L000", SqlState::INVALID_GRANTOR), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("XX001", SqlState::DATA_CORRUPTED), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("23514", SqlState::CHECK_VIOLATION), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("23001", SqlState::RESTRICT_VIOLATION), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("42622", SqlState::NAME_TOO_LONG), - ("57P04", SqlState::DATABASE_DROPPED), - ("42803", SqlState::GROUPING_ERROR), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("58P02", SqlState::DUPLICATE_FILE), + ("HV00P", SqlState::FDW_NO_SCHEMAS), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("42P20", SqlState::WINDOWING_ERROR), + ("25000", SqlState::INVALID_TRANSACTION_STATE), ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("57P02", SqlState::CRASH_SHUTDOWN), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("58030", SqlState::IO_ERROR), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), + ("22007", SqlState::INVALID_DATETIME_FORMAT), ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("22024", SqlState::UNTERMINATED_C_STRING), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("P0002", SqlState::NO_DATA_FOUND), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), ("08P01", SqlState::PROTOCOL_VIOLATION), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("23514", SqlState::CHECK_VIOLATION), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), ("42723", SqlState::DUPLICATE_FUNCTION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("23001", SqlState::RESTRICT_VIOLATION), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("28P01", SqlState::INVALID_PASSWORD), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("P0001", SqlState::RAISE_EXCEPTION), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("42704", SqlState::UNDEFINED_OBJECT), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("42701", SqlState::DUPLICATE_COLUMN), - ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("22011", SqlState::SUBSTRING_ERROR), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("42804", SqlState::DATATYPE_MISMATCH), ]), }; From 8f7481a86cb4c4cf5658a0c8ba2cbe3cd7cb6d54 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 23 Jun 2021 20:02:51 -0400 Subject: [PATCH 518/819] fix clippy --- postgres-protocol/src/types/test.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 8796ab31b..09edeee3a 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -6,6 +6,7 @@ use super::*; use crate::IsNull; #[test] +#[allow(clippy::bool_assert_comparison)] fn bool() { let mut buf = BytesMut::new(); bool_to_sql(true, &mut buf); @@ -113,7 +114,7 @@ fn array() { .unwrap(); let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), true); + assert!(array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); @@ -150,7 +151,7 @@ fn non_null_array() { .unwrap(); let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), false); + assert!(array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); From 3eb5a4dab94e95df071949a31e7765031511c8cf Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 23 Jun 2021 20:15:57 -0400 Subject: [PATCH 519/819] actually fix clippy --- postgres-protocol/src/types/test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 09edeee3a..7c20cf3ed 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -151,7 +151,7 @@ fn non_null_array() { .unwrap(); let array = array_from_sql(&buf).unwrap(); - assert!(array.has_nulls()); + assert!(!array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); From 3b7b8000ce2d135883c248dd11d02e376a4aa678 Mon Sep 17 00:00:00 2001 From: JR Smith Date: Thu, 1 Jul 2021 16:04:19 -0400 Subject: [PATCH 520/819] Made requirement of setting feature flags to access derive macros more explicit in the documentation. --- postgres-types/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index ed6f75cf5..1973f3d0e 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -6,7 +6,12 @@ //! # Derive //! //! If the `derive` cargo feature is enabled, you can derive `ToSql` and `FromSql` implementations for custom Postgres -//! types. +//! types. Explicitly, modify your `Cargo.toml` file to include the following: +//! +//! ```toml +//! [dependencies] +//! postgres-types = { version = "0.X.X", features = ["derive"] } +//! ``` //! //! ## Enums //! From 6c1542f634ae0d7733811024cd63835045f75784 Mon Sep 17 00:00:00 2001 From: Lukas Kalbertodt Date: Tue, 13 Jul 2021 11:21:11 +0200 Subject: [PATCH 521/819] Add `FromSql` and `ToSql` impls for arrays (guarded behind feature) This is feature-gated because those impls require Rust 1.51. --- postgres-types/Cargo.toml | 2 + postgres-types/src/lib.rs | 66 ++++++++++++++++++++++++-- postgres/Cargo.toml | 1 + tokio-postgres/Cargo.toml | 1 + tokio-postgres/tests/test/types/mod.rs | 14 +++++- 5 files changed, 79 insertions(+), 5 deletions(-) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index b258cee12..4fd69f613 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -12,6 +12,7 @@ categories = ["database"] [features] derive = ["postgres-derive"] +array-impls = ["array-init"] with-bit-vec-0_6 = ["bit-vec-06"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] @@ -28,6 +29,7 @@ fallible-iterator = "0.2" postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } +array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 1973f3d0e..4c559b95a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -428,8 +428,10 @@ impl WrongType { /// /// # Arrays /// -/// `FromSql` is implemented for `Vec` where `T` implements `FromSql`, and -/// corresponds to one-dimensional Postgres arrays. +/// `FromSql` is implemented for `Vec` and `[T; N]` where `T` implements +/// `FromSql`, and corresponds to one-dimensional Postgres arrays. **Note:** +/// the impl for arrays only exist when the Cargo feature `array-impls` is +/// enabled. pub trait FromSql<'a>: Sized { /// Creates a new value of this type from a buffer of data of the specified /// Postgres `Type` in its binary format. @@ -513,6 +515,47 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Vec { } } +#[cfg(feature = "array-impls")] +impl<'a, T: FromSql<'a>, const N: usize> FromSql<'a> for [T; N] { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + let member_type = match *ty.kind() { + Kind::Array(ref member) => member, + _ => panic!("expected array type"), + }; + + let array = types::array_from_sql(raw)?; + if array.dimensions().count()? > 1 { + return Err("array contains too many dimensions".into()); + } + + let mut values = array.values(); + let out = array_init::try_array_init(|i| { + let v = values + .next()? + .ok_or_else(|| -> Box { + format!("too few elements in array (expected {}, got {})", N, i).into() + })?; + T::from_sql_nullable(member_type, v) + })?; + if values.next()?.is_some() { + return Err(format!( + "excess elements in array (expected {}, got more than that)", + N, + ) + .into()); + } + + Ok(out) + } + + fn accepts(ty: &Type) -> bool { + match *ty.kind() { + Kind::Array(ref inner) => T::accepts(inner), + _ => false, + } + } +} + impl<'a> FromSql<'a> for Vec { fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { Ok(types::bytea_from_sql(raw).to_owned()) @@ -691,8 +734,10 @@ pub enum IsNull { /// /// # Arrays /// -/// `ToSql` is implemented for `Vec` and `&[T]` where `T` implements `ToSql`, -/// and corresponds to one-dimensional Postgres arrays with an index offset of 1. +/// `ToSql` is implemented for `Vec`, `&[T]` and `[T; N]` where `T` +/// implements `ToSql`, and corresponds to one-dimensional Postgres arrays with +/// an index offset of 1. **Note:** the impl for arrays only exist when the +/// Cargo feature `array-impls` is enabled. pub trait ToSql: fmt::Debug { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. @@ -808,6 +853,19 @@ impl<'a> ToSql for &'a [u8] { to_sql_checked!(); } +#[cfg(feature = "array-impls")] +impl ToSql for [T; N] { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&[T] as ToSql>::to_sql(&&self[..], ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[T] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Vec { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[T] as ToSql>::to_sql(&&**self, ty, w) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index c7c0746f0..ca1d0b232 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -21,6 +21,7 @@ all-features = true circle-ci = { repository = "sfackler/rust-postgres" } [features] +array-impls = ["tokio-postgres/array-impls"] with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 3a1537a87..edffde49a 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -27,6 +27,7 @@ circle-ci = { repository = "sfackler/rust-postgres" } default = ["runtime"] runtime = ["tokio/net", "tokio/time"] +array-impls = ["postgres-types/array-impls"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 85eed0e27..54a111b3a 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -350,7 +350,7 @@ async fn test_hstore_params() { } #[tokio::test] -async fn test_array_params() { +async fn test_array_vec_params() { test_type( "integer[]", &[ @@ -363,6 +363,18 @@ async fn test_array_params() { .await; } +#[cfg(feature = "array-impls")] +#[tokio::test] +async fn test_array_array_params() { + test_type("integer[]", &[(Some([1i32, 2i32]), "ARRAY[1,2]")]).await; + test_type("text[]", &[(Some(["peter".to_string()]), "ARRAY['peter']")]).await; + test_type( + "integer[]", + &[(Some([] as [i32; 0]), "ARRAY[]"), (None, "NULL")], + ) + .await; +} + #[allow(clippy::eq_op)] async fn test_nan_param(sql_type: &str) where From 06952e2bb09bdcc404b82f24774c5ada756a360f Mon Sep 17 00:00:00 2001 From: Lukas Kalbertodt Date: Tue, 13 Jul 2021 14:38:11 +0200 Subject: [PATCH 522/819] Use Rust 1.51 in CI We needed to bump the version because the `array-impls` feature requires const generics. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a95dbe0c..8b3a3420d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,7 @@ jobs: - uses: actions/checkout@v2 - uses: sfackler/actions/rustup@master with: - version: 1.46.0 + version: 1.51.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From 24928ebce3b7b5480d33a0593b8d89c1c4a11081 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jul 2021 13:10:25 +0000 Subject: [PATCH 523/819] Update env_logger requirement from 0.8 to 0.9 Updates the requirements on [env_logger](https://github.com/env-logger-rs/env_logger) to permit the latest version. - [Release notes](https://github.com/env-logger-rs/env_logger/releases) - [Changelog](https://github.com/env-logger-rs/env_logger/blob/main/CHANGELOG.md) - [Commits](https://github.com/env-logger-rs/env_logger/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: env_logger dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index edffde49a..db3a65f32 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -57,7 +57,7 @@ tokio-util = { version = "0.6", features = ["codec"] } [dev-dependencies] tokio = { version = "1.0", features = ["full"] } -env_logger = "0.8" +env_logger = "0.9" criterion = "0.3" bit-vec-06 = { version = "0.6", package = "bit-vec" } From da4e323578e868ff720d409e7c63dc41fa00bd3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ale=C5=A1=20Bizjak?= Date: Sun, 25 Jul 2021 22:02:28 +0200 Subject: [PATCH 524/819] Implement BorrowToSql for an additional type. Closes #811. --- postgres-types/src/lib.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 4c559b95a..59ec4f811 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1042,6 +1042,19 @@ impl BorrowToSql for &dyn ToSql { } } +impl sealed::Sealed for &(dyn ToSql + Sync) {} + +/// In async contexts it is sometimes necessary to have the additional +/// Sync requirement on parameters for queries since this enables the +/// resulting Futures to be Send, hence usable in, e.g., tokio::spawn. +/// This instance is provided for those cases. +impl BorrowToSql for &(dyn ToSql + Sync) { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + *self + } +} + impl sealed::Sealed for T where T: ToSql {} impl BorrowToSql for T From be0c85ac0b985986b7ffdf8e68cc91294481c2e2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 10 Aug 2021 20:30:27 -0400 Subject: [PATCH 525/819] Update phf --- codegen/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/error/sqlstate.rs | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index fc02751cf..14bebccf2 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.9" +phf_codegen = "0.10" regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index db3a65f32..1bedf6b57 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -48,7 +48,7 @@ log = "0.4" parking_lot = "0.11" percent-encoding = "2.0" pin-project-lite = "0.2" -phf = "0.9" +phf = "0.10" postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } postgres-types = { version = "0.2.1", path = "../postgres-types" } socket2 = "0.4" diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 1996d9b13..71648a948 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1341,7 +1341,7 @@ enum Inner { static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { key: 12913932095322966823, - disps: ::phf::Slice::Static(&[ + disps: &[ (0, 12), (0, 18), (0, 25), @@ -1394,8 +1394,8 @@ static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = (0, 242), (0, 72), (16, 53), - ]), - entries: ::phf::Slice::Static(&[ + ], + entries: &[ ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), ("40P01", SqlState::T_R_DEADLOCK_DETECTED), ("42703", SqlState::UNDEFINED_COLUMN), @@ -1654,5 +1654,5 @@ static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), ("42804", SqlState::DATATYPE_MISMATCH), - ]), + ], }; From a8a35eb6db62b878b168c5c53110be8d6a393b4c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 10 Aug 2021 21:07:20 -0400 Subject: [PATCH 526/819] fix clippy --- postgres-derive/src/tosql.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index a1c87b0ff..1808e787d 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -30,7 +30,7 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { .. }) if fields.unnamed.len() == 1 => { let field = fields.unnamed.first().unwrap(); - (accepts::domain_body(&name, &field), domain_body()) + (accepts::domain_body(&name, field), domain_body()) } Data::Struct(DataStruct { fields: Fields::Named(ref fields), From 3e4be865318ddd4a6b4493d689703db32ca3d184 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 10 Aug 2021 21:17:50 -0400 Subject: [PATCH 527/819] more clippy --- postgres-types/src/lib.rs | 2 +- tokio-postgres/src/config.rs | 8 ++++---- tokio-postgres/src/prepare.rs | 8 ++++---- tokio-postgres/src/transaction.rs | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 59ec4f811..4dd87c71c 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -909,7 +909,7 @@ impl<'a> ToSql for &'a str { impl<'a> ToSql for Cow<'a, str> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - <&str as ToSql>::to_sql(&&self.as_ref(), ty, w) + <&str as ToSql>::to_sql(&self.as_ref(), ty, w) } fn accepts(ty: &Type) -> bool { diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 111487173..eb4e5bdc5 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -390,19 +390,19 @@ impl Config { fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { match key { "user" => { - self.user(&value); + self.user(value); } "password" => { self.password(value); } "dbname" => { - self.dbname(&value); + self.dbname(value); } "options" => { - self.options(&value); + self.options(value); } "application_name" => { - self.application_name(&value); + self.application_name(value); } "sslmode" => { let mode = match value { diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 49397debf..7a6163415 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -86,7 +86,7 @@ pub async fn prepare( let mut parameters = vec![]; let mut it = parameter_description.parameters(); while let Some(oid) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, oid).await?; + let type_ = get_type(client, oid).await?; parameters.push(type_); } @@ -94,13 +94,13 @@ pub async fn prepare( if let Some(row_description) = row_description { let mut it = row_description.fields(); while let Some(field) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, field.type_oid()).await?; + let type_ = get_type(client, field.type_oid()).await?; let column = Column::new(field.name().to_string(), type_); columns.push(column); } } - Ok(Statement::new(&client, name, parameters, columns)) + Ok(Statement::new(client, name, parameters, columns)) } fn prepare_rec<'a>( @@ -120,7 +120,7 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu client.with_buf(|buf| { frontend::parse(name, query, types.iter().map(Type::oid), buf).map_err(Error::encode)?; - frontend::describe(b'S', &name, buf).map_err(Error::encode)?; + frontend::describe(b'S', name, buf).map_err(Error::encode)?; frontend::sync(buf); Ok(buf.split().freeze()) }) diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index a1aa7611f..b72b119bf 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -201,7 +201,7 @@ impl<'a> Transaction<'a> { I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let statement = statement.__convert().into_statement(&self.client).await?; + let statement = statement.__convert().into_statement(self.client).await?; bind::bind(self.client.inner(), statement, params).await } From be0d71fad51b9ea070493ed44a2ab15557635b85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lauren=C8=9Biu=20Nicola?= Date: Thu, 23 Sep 2021 19:28:02 +0300 Subject: [PATCH 528/819] Add support for time 0.3 --- postgres-types/Cargo.toml | 2 + postgres-types/src/lib.rs | 2 + postgres-types/src/time_03.rs | 108 +++++++++++++++ postgres/Cargo.toml | 1 + postgres/src/lib.rs | 3 +- tokio-postgres/Cargo.toml | 3 +- tokio-postgres/src/lib.rs | 3 +- tokio-postgres/tests/test/types/mod.rs | 2 + tokio-postgres/tests/test/types/time_03.rs | 149 +++++++++++++++++++++ 9 files changed, 270 insertions(+), 3 deletions(-) create mode 100644 postgres-types/src/time_03.rs create mode 100644 tokio-postgres/tests/test/types/time_03.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 4fd69f613..8fc6ed107 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -22,6 +22,7 @@ with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] with-time-0_2 = ["time-02"] +with-time-0_3 = ["time-03"] [dependencies] bytes = "1.0" @@ -40,3 +41,4 @@ serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } +time-03 = { version = "0.3", package = "time", default-features = false, optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 4dd87c71c..2a953db2f 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -209,6 +209,8 @@ mod geo_types_07; mod serde_json_1; #[cfg(feature = "with-time-0_2")] mod time_02; +#[cfg(feature = "with-time-0_3")] +mod time_03; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; diff --git a/postgres-types/src/time_03.rs b/postgres-types/src/time_03.rs new file mode 100644 index 000000000..f136fab7c --- /dev/null +++ b/postgres-types/src/time_03.rs @@ -0,0 +1,108 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::convert::TryFrom; +use std::error::Error; +use time_03::{Date, Duration, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; + +use crate::{FromSql, IsNull, ToSql, Type}; + +fn base() -> PrimitiveDateTime { + PrimitiveDateTime::new(Date::from_ordinal_date(2000, 1).unwrap(), Time::MIDNIGHT) +} + +impl<'a> FromSql<'a> for PrimitiveDateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let t = types::timestamp_from_sql(raw)?; + Ok(base() + Duration::microseconds(t)) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for PrimitiveDateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let time = match i64::try_from((*self - base()).whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::timestamp_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for OffsetDateTime { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { + let primitive = PrimitiveDateTime::from_sql(type_, raw)?; + Ok(primitive.assume_utc()) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for OffsetDateTime { + fn to_sql( + &self, + type_: &Type, + w: &mut BytesMut, + ) -> Result> { + let utc_datetime = self.to_offset(UtcOffset::UTC); + let date = utc_datetime.date(); + let time = utc_datetime.time(); + let primitive = PrimitiveDateTime::new(date, time); + primitive.to_sql(type_, w) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let jd = types::date_from_sql(raw)?; + Ok(base().date() + Duration::days(i64::from(jd))) + } + + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let jd = (*self - base().date()).whole_days(); + if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { + return Err("value too large to transmit".into()); + } + + types::date_to_sql(jd as i32, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let usec = types::time_from_sql(raw)?; + Ok(Time::MIDNIGHT + Duration::microseconds(usec)) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let delta = *self - Time::MIDNIGHT; + let time = match i64::try_from(delta.whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::time_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ca1d0b232..3d1c20234 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -31,6 +31,7 @@ with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] +with-time-0_3 = ["tokio-postgres/with-time-0_3"] [dependencies] bytes = "1.0" diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 7d96bfd9f..a599532e4 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -61,7 +61,8 @@ //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | -//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | +//! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | +//! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(clippy::all, rust_2018_idioms, missing_docs)] pub use fallible_iterator; diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 1bedf6b57..d35a323a1 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -37,6 +37,7 @@ with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-time-0_2 = ["postgres-types/with-time-0_2"] +with-time-0_3 = ["postgres-types/with-time-0_3"] [dependencies] async-trait = "0.1" @@ -70,4 +71,4 @@ serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } time-02 = { version = "0.2", package = "time" } - +time-03 = { version = "0.3", package = "time", features = ["parsing"] } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 6dd0b0151..e9516e0b3 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -112,7 +112,8 @@ //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | -//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | +//! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | +//! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.7")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 54a111b3a..604e2de32 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -29,6 +29,8 @@ mod geo_types_07; mod serde_json_1; #[cfg(feature = "with-time-0_2")] mod time_02; +#[cfg(feature = "with-time-0_3")] +mod time_03; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; diff --git a/tokio-postgres/tests/test/types/time_03.rs b/tokio-postgres/tests/test/types/time_03.rs new file mode 100644 index 000000000..df013c9bf --- /dev/null +++ b/tokio-postgres/tests/test/types/time_03.rs @@ -0,0 +1,149 @@ +use time_03::{format_description, OffsetDateTime, PrimitiveDateTime}; +use tokio_postgres::types::{Date, Timestamp}; + +use crate::types::test_type; + +// time 0.2 does not [yet?] support parsing fractional seconds +// https://github.com/time-rs/time/issues/226 + +#[tokio::test] +async fn test_primitive_date_time_params() { + fn make_check(time: &str) -> (Option, &str) { + let format = + format_description::parse("'[year]-[month]-[day] [hour]:[minute]:[second]'").unwrap(); + (Some(PrimitiveDateTime::parse(time, &format).unwrap()), time) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00'"), // .010000000 + make_check("'1965-09-25 11:19:33'"), // .100314000 + make_check("'2010-02-09 23:11:45'"), // .120200000 + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_primitive_date_time_params() { + fn make_check(time: &str) -> (Timestamp, &str) { + let format = + format_description::parse("'[year]-[month]-[day] [hour]:[minute]:[second]'").unwrap(); + ( + Timestamp::Value(PrimitiveDateTime::parse(time, &format).unwrap()), + time, + ) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00'"), // .010000000 + make_check("'1965-09-25 11:19:33'"), // .100314000 + make_check("'2010-02-09 23:11:45'"), // .120200000 + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_offset_date_time_params() { + fn make_check(time: &str) -> (Option, &str) { + let format = + format_description::parse("'[year]-[month]-[day] [hour]:[minute]:[second] [offset_hour sign:mandatory][offset_minute]'").unwrap(); + (Some(OffsetDateTime::parse(time, &format).unwrap()), time) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00 +0000'"), // .010000000 + make_check("'1965-09-25 11:19:33 +0000'"), // .100314000 + make_check("'2010-02-09 23:11:45 +0000'"), // .120200000 + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_offset_date_time_params() { + fn make_check(time: &str) -> (Timestamp, &str) { + let format = + format_description::parse("'[year]-[month]-[day] [hour]:[minute]:[second] [offset_hour sign:mandatory][offset_minute]'").unwrap(); + ( + Timestamp::Value(OffsetDateTime::parse(time, &format).unwrap()), + time, + ) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00 +0000'"), // .010000000 + make_check("'1965-09-25 11:19:33 +0000'"), // .100314000 + make_check("'2010-02-09 23:11:45 +0000'"), // .120200000 + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_params() { + fn make_check(date: &str) -> (Option, &str) { + let format = format_description::parse("'[year]-[month]-[day]'").unwrap(); + (Some(time_03::Date::parse(date, &format).unwrap()), date) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_params() { + fn make_check(date: &str) -> (Date, &str) { + let format = format_description::parse("'[year]-[month]-[day]'").unwrap(); + ( + Date::Value(time_03::Date::parse(date, &format).unwrap()), + date, + ) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_time_params() { + fn make_check(time: &str) -> (Option, &str) { + let format = format_description::parse("'[hour]:[minute]:[second]'").unwrap(); + (Some(time_03::Time::parse(time, &format).unwrap()), time) + } + test_type( + "TIME", + &[ + make_check("'00:00:00'"), // .010000000 + make_check("'11:19:33'"), // .100314000 + make_check("'23:11:45'"), // .120200000 + (None, "NULL"), + ], + ) + .await; +} From 34d8b77644880dbaef1bcd7e8aa246dc59e90d11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lauren=C8=9Biu=20Nicola?= Date: Fri, 24 Sep 2021 08:41:18 +0300 Subject: [PATCH 529/819] Add feature gates for doctests --- postgres-native-tls/src/lib.rs | 6 ++++++ postgres-openssl/src/lib.rs | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 2f2e6e6ad..a06f185b5 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -4,10 +4,12 @@ //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; +//! # #[cfg(feature = "runtime")] //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -19,6 +21,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! ); +//! # } //! //! // ... //! # Ok(()) @@ -27,10 +30,12 @@ //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; +//! # #[cfg(feature = "runtime")] //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -42,6 +47,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; +//! # } //! # Ok(()) //! # } //! ``` diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index f3c0b9309..49fc2807c 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -4,9 +4,11 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; +//! # #[cfg(feature = "runtime")] //! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -15,6 +17,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! ); +//! # } //! //! // ... //! # Ok(()) @@ -23,9 +26,11 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; +//! # #[cfg(feature = "runtime")] //! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -34,6 +39,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; +//! # } //! //! // ... //! # Ok(()) From c7785d0b10bf629b1dbf915d24f370d5e11da4f4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 29 Sep 2021 19:24:46 -0400 Subject: [PATCH 530/819] Release postgres-types v0.2.2 --- postgres-types/CHANGELOG.md | 8 ++++++++ postgres-types/Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 83bc4d1fd..f8b2835eb 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,13 @@ # Change Log +## v0.2.2 - 2021-09-29 + +### Added + +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `ToSql` and `FromSql` implementations for array types via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + ## v0.2.1 - 2021-04-03 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 8fc6ed107..7eca3fbcf 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.1" +version = "0.2.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 349c38b1fe77eae6b27a2ca65d760bff984382e7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 29 Sep 2021 19:27:15 -0400 Subject: [PATCH 531/819] Release postgres-protocol v0.6.2 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 7a51cb192..eb37f5883 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.6.2 - 2021-09-29 + +### Changed + +* Upgraded `hmac`. + ## v0.6.1 - 2021-04-03 ### Added diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index d4ae8c301..a4ed3e907 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.1" +version = "0.6.2" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From 8542d078bfcd88dcb92ceae7ec9d17364586f3a2 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 29 Sep 2021 19:34:32 -0400 Subject: [PATCH 532/819] Release tokio-postgres v0.7.2 --- tokio-postgres/CHANGELOG.md | 13 +++++++++++++ tokio-postgres/Cargo.toml | 4 ++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 3a7aa2ae7..2c2ea5bc8 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,18 @@ # Change Log +## v0.7.2 - 2021-09-29 + +### Fixed + +* Fixed a deadlock when pipelined requests concurrently prepare cached typeinfo queries. + +### Added + +* Added `SimpleQueryRow::columns`. +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `FromSql` and `ToSql` implementations for arrays via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + ## v0.7.2 - 2021-04-25 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index d35a323a1..65f65d641 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.2" +version = "0.7.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -51,7 +51,7 @@ percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.10" postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } -postgres-types = { version = "0.2.1", path = "../postgres-types" } +postgres-types = { version = "0.2.2", path = "../postgres-types" } socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.6", features = ["codec"] } From d45461614aca87022c17a2cc26b22325bf161fa5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 29 Sep 2021 19:38:29 -0400 Subject: [PATCH 533/819] Release postgres v0.19.2 --- postgres/CHANGELOG.md | 9 +++++++++ postgres/Cargo.toml | 4 ++-- tokio-postgres/CHANGELOG.md | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index e68dedc5b..6af8d914b 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,14 @@ # Change Log +## v0.19.2 - 2021-09-29 + +### Added + +* Added `SimpleQueryRow::columns`. +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `FromSql` and `ToSql` implementations for arrays via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + ## v0.19.1 - 2021-04-03 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 3d1c20234..b61e42aca 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.1" +version = "0.19.2" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -37,7 +37,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] bytes = "1.0" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.7.1", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.2", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 2c2ea5bc8..9e70c0045 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,6 +1,6 @@ # Change Log -## v0.7.2 - 2021-09-29 +## v0.7.3 - 2021-09-29 ### Fixed From b2df11579f8b49728d3096b6bd6da0b7ab27ccf0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 19 Oct 2021 19:36:14 -0400 Subject: [PATCH 534/819] Fix commit-time error reporting Closes #832 --- tokio-postgres/src/query.rs | 25 ++++++++++++++----------- tokio-postgres/tests/test/main.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 11 deletions(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index f139ed915..cdb952190 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -99,11 +99,12 @@ where }; let mut responses = start(client, buf).await?; + let mut rows = 0; loop { match responses.next().await? { Message::DataRow(_) => {} Message::CommandComplete(body) => { - let rows = body + rows = body .tag() .map_err(Error::parse)? .rsplit(' ') @@ -111,9 +112,9 @@ where .unwrap() .parse() .unwrap_or(0); - return Ok(rows); } - Message::EmptyQueryResponse => return Ok(0), + Message::EmptyQueryResponse => rows = 0, + Message::ReadyForQuery(_) => return Ok(rows), _ => return Err(Error::unexpected_message()), } } @@ -203,15 +204,17 @@ impl Stream for RowStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); - match ready!(this.responses.poll_next(cx)?) { - Message::DataRow(body) => { - Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) + loop { + match ready!(this.responses.poll_next(cx)?) { + Message::DataRow(body) => { + return Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) + } + Message::EmptyQueryResponse + | Message::CommandComplete(_) + | Message::PortalSuspended => {} + Message::ReadyForQuery(_) => return Poll::Ready(None), + _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), } - Message::EmptyQueryResponse - | Message::CommandComplete(_) - | Message::PortalSuspended => Poll::Ready(None), - Message::ErrorResponse(body) => Poll::Ready(Some(Err(Error::db(body)))), - _ => Poll::Ready(Some(Err(Error::unexpected_message()))), } } } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index c0b4bf202..31d7fa295 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -805,3 +805,29 @@ async fn query_opt() { .err() .unwrap(); } + +#[tokio::test] +async fn deferred_constraint() { + let client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE t ( + i INT, + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED + ); + ", + ) + .await + .unwrap(); + + client + .execute("INSERT INTO t (i) VALUES (1)", &[]) + .await + .unwrap(); + client + .execute("INSERT INTO t (i) VALUES (1)", &[]) + .await + .unwrap_err(); +} From 0adcf58555fa1a5f42bdab512ea462ca993cad62 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 19 Oct 2021 19:58:49 -0400 Subject: [PATCH 535/819] Release tokio-postgres v0.7.4 --- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 9e70c0045..34b4fc1d9 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.7.4 - 2021-10-19 + +### Fixed + +* Fixed reporting of commit-time errors triggered by deferred constraints. + ## v0.7.3 - 2021-09-29 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 65f65d641..17286dc21 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.3" +version = "0.7.4" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From f6189a95f24af45068ecfd6e3c4e6b71ac8e43fe Mon Sep 17 00:00:00 2001 From: ilslv Date: Thu, 28 Oct 2021 12:10:30 +0300 Subject: [PATCH 536/819] Fix transaction not being rolled back on `Client::transaction()` `Future` dropped before completion --- tokio-postgres/src/client.rs | 41 +++++++++- tokio-postgres/tests/test/main.rs | 122 +++++++++++++++++++++++++++++- 2 files changed, 159 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 4a099d941..dea77da94 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,4 +1,4 @@ -use crate::codec::BackendMessages; +use crate::codec::{BackendMessages, FrontendMessage}; use crate::config::{Host, SslMode}; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; @@ -19,7 +19,7 @@ use fallible_iterator::FallibleIterator; use futures::channel::mpsc; use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; -use postgres_protocol::message::backend::Message; +use postgres_protocol::message::{backend::Message, frontend}; use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; @@ -488,7 +488,42 @@ impl Client { /// /// The transaction will roll back by default - use the `commit` method to commit it. pub async fn transaction(&mut self) -> Result, Error> { - self.batch_execute("BEGIN").await?; + struct RollbackIfNotDone<'me> { + client: &'me Client, + done: bool, + } + + impl<'a> Drop for RollbackIfNotDone<'a> { + fn drop(&mut self) { + if self.done { + return; + } + + let buf = self.client.inner().with_buf(|buf| { + frontend::query("ROLLBACK", buf).unwrap(); + buf.split().freeze() + }); + let _ = self + .client + .inner() + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + } + + // This is done, as `Future` created by this method can be dropped after + // `RequestMessages` is synchronously send to the `Connection` by + // `batch_execute()`, but before `Responses` is asynchronously polled to + // completion. In that case `Transaction` won't be created and thus + // won't be rolled back. + { + let mut cleaner = RollbackIfNotDone { + client: self, + done: false, + }; + self.batch_execute("BEGIN").await?; + cleaner.done = true; + } + Ok(Transaction::new(self)) } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 31d7fa295..dcfbc5308 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -3,9 +3,12 @@ use bytes::{Bytes, BytesMut}; use futures::channel::mpsc; use futures::{ - future, join, pin_mut, stream, try_join, FutureExt, SinkExt, StreamExt, TryStreamExt, + future, join, pin_mut, stream, try_join, Future, FutureExt, SinkExt, StreamExt, TryStreamExt, }; +use pin_project_lite::pin_project; use std::fmt::Write; +use std::pin::Pin; +use std::task::{Context, Poll}; use std::time::Duration; use tokio::net::TcpStream; use tokio::time; @@ -22,6 +25,35 @@ mod parse; mod runtime; mod types; +pin_project! { + /// Polls `F` at most `polls_left` times returning `Some(F::Output)` if + /// [`Future`] returned [`Poll::Ready`] or [`None`] otherwise. + struct Cancellable { + #[pin] + fut: F, + polls_left: usize, + } +} + +impl Future for Cancellable { + type Output = Option; + + fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + let this = self.project(); + match this.fut.poll(ctx) { + Poll::Ready(r) => Poll::Ready(Some(r)), + Poll::Pending => { + *this.polls_left = this.polls_left.saturating_sub(1); + if *this.polls_left == 0 { + Poll::Ready(None) + } else { + Poll::Pending + } + } + } + } +} + async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let config = s.parse::().unwrap(); @@ -35,6 +67,20 @@ async fn connect(s: &str) -> Client { client } +async fn current_transaction_id(client: &Client) -> i64 { + client + .query("SELECT txid_current()", &[]) + .await + .unwrap() + .pop() + .unwrap() + .get::<_, i64>("txid_current") +} + +async fn in_transaction(client: &Client) -> bool { + current_transaction_id(client).await == current_transaction_id(client).await +} + #[tokio::test] async fn plain_password_missing() { connect_raw("user=pass_user dbname=postgres") @@ -377,6 +423,80 @@ async fn transaction_rollback() { assert_eq!(rows.len(), 0); } +#[tokio::test] +async fn transaction_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction(); + let fut = Cancellable { + fut: txn, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + +#[tokio::test] +async fn transaction_commit_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction().await.unwrap(); + let commit = txn.commit(); + let fut = Cancellable { + fut: commit, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + +#[tokio::test] +async fn transaction_rollback_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction().await.unwrap(); + let rollback = txn.rollback(); + let fut = Cancellable { + fut: rollback, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + #[tokio::test] async fn transaction_rollback_drop() { let mut client = connect("user=postgres").await; From 24b01add826f0844df1eb75f64cac42eda88bcd6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 28 Oct 2021 19:08:38 -0400 Subject: [PATCH 537/819] Don't use a built container for test postgres --- .github/workflows/ci.yml | 6 +----- docker-compose.yml | 8 ++++++-- docker/Dockerfile | 3 --- 3 files changed, 7 insertions(+), 10 deletions(-) delete mode 100644 docker/Dockerfile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8b3a3420d..e38dea88d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,13 +50,9 @@ jobs: test: name: test runs-on: ubuntu-latest - services: - postgres: - image: sfackler/rust-postgres-test:6 - ports: - - 5433:5433 steps: - uses: actions/checkout@v2 + - run: docker compose up -d - uses: sfackler/actions/rustup@master with: version: 1.51.0 diff --git a/docker-compose.yml b/docker-compose.yml index d44fbe866..0ed44148d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,10 @@ version: '2' services: postgres: - image: "sfackler/rust-postgres-test:6" + image: postgres:14 ports: - - 5433:5433 + - 5433:5433 + volumes: + - ./docker/sql_setup.sh:/docker-entrypoint-initdb.d/sql_setup.sh + environment: + POSTGRES_PASSWORD: postgres diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 1dd7f3db6..000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM postgres:12 - -COPY sql_setup.sh /docker-entrypoint-initdb.d/ From a47a8edf98763846003b51f88fc116704bd7c64a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 28 Oct 2021 19:16:57 -0400 Subject: [PATCH 538/819] Remove src/url.rs from THIRD_PARTY It hasn't existed in the project for quite a while --- THIRD_PARTY | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/THIRD_PARTY b/THIRD_PARTY index 80336ea0f..05e5ac435 100644 --- a/THIRD_PARTY +++ b/THIRD_PARTY @@ -27,33 +27,3 @@ BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - -------------------------------------------------------------------------------- - -* src/url.rs has been copied from Rust - -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. From 33703689e0addf1af4ac34762391020630c2b7be Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 28 Oct 2021 19:22:13 -0400 Subject: [PATCH 539/819] Clean up licenses --- LICENSE | 20 --- LICENSE-APACHE | 201 ++++++++++++++++++++++++++++ LICENSE-MIT | 22 ++++ postgres-derive/LICENSE-APACHE | 202 +---------------------------- postgres-derive/LICENSE-MIT | 23 +--- postgres-native-tls/LICENSE-APACHE | 2 +- postgres-native-tls/LICENSE-MIT | 2 +- postgres-openssl/LICENSE-APACHE | 2 +- postgres-openssl/LICENSE-MIT | 2 +- postgres-protocol/LICENSE-APACHE | 202 +---------------------------- postgres-protocol/LICENSE-MIT | 23 +--- postgres-types/LICENSE-APACHE | 202 +---------------------------- postgres-types/LICENSE-MIT | 23 +--- postgres/LICENSE-APACHE | 202 +---------------------------- postgres/LICENSE-MIT | 23 +--- tokio-postgres/LICENSE-APACHE | 202 +---------------------------- tokio-postgres/LICENSE-MIT | 23 +--- 17 files changed, 237 insertions(+), 1139 deletions(-) delete mode 100644 LICENSE create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT mode change 100644 => 120000 postgres-derive/LICENSE-APACHE mode change 100644 => 120000 postgres-derive/LICENSE-MIT mode change 100644 => 120000 postgres-protocol/LICENSE-APACHE mode change 100644 => 120000 postgres-protocol/LICENSE-MIT mode change 100644 => 120000 postgres-types/LICENSE-APACHE mode change 100644 => 120000 postgres-types/LICENSE-MIT mode change 100644 => 120000 postgres/LICENSE-APACHE mode change 100644 => 120000 postgres/LICENSE-MIT mode change 100644 => 120000 tokio-postgres/LICENSE-APACHE mode change 100644 => 120000 tokio-postgres/LICENSE-MIT diff --git a/LICENSE b/LICENSE deleted file mode 100644 index c7e577c00..000000000 --- a/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2017 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/postgres-derive/LICENSE-APACHE b/postgres-derive/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-derive/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-derive/LICENSE-APACHE b/postgres-derive/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-derive/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-derive/LICENSE-MIT b/postgres-derive/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-derive/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-derive/LICENSE-MIT b/postgres-derive/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-derive/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-native-tls/LICENSE-APACHE b/postgres-native-tls/LICENSE-APACHE index b9e46b0fc..965b606f3 120000 --- a/postgres-native-tls/LICENSE-APACHE +++ b/postgres-native-tls/LICENSE-APACHE @@ -1 +1 @@ -../tokio-postgres/LICENSE-APACHE \ No newline at end of file +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-native-tls/LICENSE-MIT b/postgres-native-tls/LICENSE-MIT index 162832a42..76219eb72 120000 --- a/postgres-native-tls/LICENSE-MIT +++ b/postgres-native-tls/LICENSE-MIT @@ -1 +1 @@ -../tokio-postgres/LICENSE-MIT \ No newline at end of file +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-openssl/LICENSE-APACHE b/postgres-openssl/LICENSE-APACHE index b9e46b0fc..965b606f3 120000 --- a/postgres-openssl/LICENSE-APACHE +++ b/postgres-openssl/LICENSE-APACHE @@ -1 +1 @@ -../tokio-postgres/LICENSE-APACHE \ No newline at end of file +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-openssl/LICENSE-MIT b/postgres-openssl/LICENSE-MIT index 162832a42..76219eb72 120000 --- a/postgres-openssl/LICENSE-MIT +++ b/postgres-openssl/LICENSE-MIT @@ -1 +1 @@ -../tokio-postgres/LICENSE-MIT \ No newline at end of file +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-protocol/LICENSE-APACHE b/postgres-protocol/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-protocol/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-protocol/LICENSE-APACHE b/postgres-protocol/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-protocol/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-protocol/LICENSE-MIT b/postgres-protocol/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-protocol/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-protocol/LICENSE-MIT b/postgres-protocol/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-protocol/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-types/LICENSE-APACHE b/postgres-types/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-types/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-types/LICENSE-APACHE b/postgres-types/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-types/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-types/LICENSE-MIT b/postgres-types/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-types/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-types/LICENSE-MIT b/postgres-types/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-types/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres/LICENSE-APACHE b/postgres/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres/LICENSE-APACHE b/postgres/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres/LICENSE-MIT b/postgres/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres/LICENSE-MIT b/postgres/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/tokio-postgres/LICENSE-APACHE b/tokio-postgres/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/tokio-postgres/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/tokio-postgres/LICENSE-APACHE b/tokio-postgres/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/tokio-postgres/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/tokio-postgres/LICENSE-MIT b/tokio-postgres/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/tokio-postgres/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/tokio-postgres/LICENSE-MIT b/tokio-postgres/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/tokio-postgres/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file From 8bb5712406c7c3c9763daa553de525cad55785d4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 28 Oct 2021 19:32:33 -0400 Subject: [PATCH 540/819] Implement ToStatement for String Closes #794 --- tokio-postgres/src/to_statement.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tokio-postgres/src/to_statement.rs b/tokio-postgres/src/to_statement.rs index 3ff82493c..427f77dd7 100644 --- a/tokio-postgres/src/to_statement.rs +++ b/tokio-postgres/src/to_statement.rs @@ -47,3 +47,11 @@ impl ToStatement for str { } impl Sealed for str {} + +impl ToStatement for String { + fn __convert(&self) -> ToStatementType<'_> { + ToStatementType::Query(self) + } +} + +impl Sealed for String {} From 84fa5fa1d007e95e18bef95f01e9a065ccf1e415 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 29 Oct 2021 21:33:07 -0400 Subject: [PATCH 541/819] Release tokio-postgres v0.7.5 --- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 34b4fc1d9..eca196f06 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.7.5 - 2021-10-29 + +### Fixed + +* Fixed a bug where the client could enter into a transaction if the `Client::transaction` future was dropped before completion. + ## v0.7.4 - 2021-10-19 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 17286dc21..5974fe64f 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.4" +version = "0.7.5" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From 6c407d39d5037d01c14f9c830a6f798dd94d2e82 Mon Sep 17 00:00:00 2001 From: Ruslan Talpa Date: Mon, 1 Nov 2021 12:31:59 +0200 Subject: [PATCH 542/819] implement Unknown encoding for query parameters --- postgres-types/src/lib.rs | 42 +++++++++++++++++++++++++++++++++++-- tokio-postgres/src/query.rs | 4 +++- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2a953db2f..0c93bed2e 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -769,6 +769,40 @@ pub trait ToSql: fmt::Debug { ty: &Type, out: &mut BytesMut, ) -> Result>; + + /// Specify the encode format + fn encode_format(&self) -> i16 { 1 } + + /// return string representation + fn as_string(&self) -> String { + panic!("as_string not implemented for {:?}", self) + } +} + + +/// A Wrapper type used for sending query parameters encoded as unknown. +#[derive(Debug)] +pub struct Unknown<'a>(pub &'a (dyn ToSql + Sync)); + +impl ToSql for Unknown<'_> { + fn to_sql( + &self, + _ty: &Type, + out: &mut BytesMut, + ) -> Result> { + match *self { + Unknown(val) => { + types::text_to_sql(&val.as_string(), out); + Ok(IsNull::No) + } + } + } + + fn accepts(_ty: &Type) -> bool { true } + + fn encode_format(&self) -> i16 { 0 } + + to_sql_checked!(); } impl<'a, T> ToSql for &'a T @@ -905,7 +939,7 @@ impl<'a> ToSql for &'a str { _ => false, } } - + fn as_string(&self) -> String { self.to_string() } to_sql_checked!(); } @@ -929,7 +963,7 @@ impl ToSql for String { fn accepts(ty: &Type) -> bool { <&str as ToSql>::accepts(ty) } - + fn as_string(&self) -> String { self.clone() } to_sql_checked!(); } @@ -944,6 +978,10 @@ macro_rules! simple_to { Ok(IsNull::No) } + fn as_string(&self) -> String { + format!("{}", &self) + } + accepts!($($expected),+); to_sql_checked!(); diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index cdb952190..6de9c5b88 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -156,6 +156,8 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { + + let (param_formats, params):(Vec<_>, Vec<_>) = params.into_iter().map(|p| (p.borrow_to_sql().encode_format(),p)).unzip(); let params = params.into_iter(); assert!( @@ -169,7 +171,7 @@ where let r = frontend::bind( portal, statement.name(), - Some(1), + param_formats, params.zip(statement.params()).enumerate(), |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), From 08b7c65f8d367a701054327918ca653d8a21d66e Mon Sep 17 00:00:00 2001 From: Ruslan Talpa Date: Tue, 2 Nov 2021 11:13:25 +0200 Subject: [PATCH 543/819] remove as_string --- postgres-types/src/lib.rs | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 0c93bed2e..02fb48ed1 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -772,17 +772,12 @@ pub trait ToSql: fmt::Debug { /// Specify the encode format fn encode_format(&self) -> i16 { 1 } - - /// return string representation - fn as_string(&self) -> String { - panic!("as_string not implemented for {:?}", self) - } } /// A Wrapper type used for sending query parameters encoded as unknown. #[derive(Debug)] -pub struct Unknown<'a>(pub &'a (dyn ToSql + Sync)); +pub struct Unknown<'a>(pub &'a str); impl ToSql for Unknown<'_> { fn to_sql( @@ -792,7 +787,7 @@ impl ToSql for Unknown<'_> { ) -> Result> { match *self { Unknown(val) => { - types::text_to_sql(&val.as_string(), out); + types::text_to_sql(val, out); Ok(IsNull::No) } } @@ -939,7 +934,7 @@ impl<'a> ToSql for &'a str { _ => false, } } - fn as_string(&self) -> String { self.to_string() } + to_sql_checked!(); } @@ -963,7 +958,7 @@ impl ToSql for String { fn accepts(ty: &Type) -> bool { <&str as ToSql>::accepts(ty) } - fn as_string(&self) -> String { self.clone() } + to_sql_checked!(); } @@ -978,10 +973,6 @@ macro_rules! simple_to { Ok(IsNull::No) } - fn as_string(&self) -> String { - format!("{}", &self) - } - accepts!($($expected),+); to_sql_checked!(); From 683868850eda0f02a0c8f2776eb20d627c8581e3 Mon Sep 17 00:00:00 2001 From: Ruslan Talpa Date: Fri, 19 Nov 2021 09:22:36 +0200 Subject: [PATCH 544/819] remove "Unknown" and add Format enum --- postgres-types/src/lib.rs | 39 ++++++++++++++++--------------------- tokio-postgres/src/query.rs | 3 +-- 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 02fb48ed1..0121cbb6e 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -771,35 +771,30 @@ pub trait ToSql: fmt::Debug { ) -> Result>; /// Specify the encode format - fn encode_format(&self) -> i16 { 1 } + fn encode_format(&self) -> Format { Format::Binary } } +/// Supported Postgres message format types +/// +/// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8` +pub enum Format { + /// Text format (UTF-8) + Text, + /// Compact, typed binary format + Binary, +} -/// A Wrapper type used for sending query parameters encoded as unknown. -#[derive(Debug)] -pub struct Unknown<'a>(pub &'a str); - -impl ToSql for Unknown<'_> { - fn to_sql( - &self, - _ty: &Type, - out: &mut BytesMut, - ) -> Result> { - match *self { - Unknown(val) => { - types::text_to_sql(val, out); - Ok(IsNull::No) - } +/// Convert from `Format` to the Postgres integer representation of those formats +impl From for i16 { + fn from(format: Format) -> Self { + match format { + Format::Text => 0, + Format::Binary => 1, } } - - fn accepts(_ty: &Type) -> bool { true } - - fn encode_format(&self) -> i16 { 0 } - - to_sql_checked!(); } + impl<'a, T> ToSql for &'a T where T: ToSql, diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 6de9c5b88..af57e2aec 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -156,8 +156,7 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - - let (param_formats, params):(Vec<_>, Vec<_>) = params.into_iter().map(|p| (p.borrow_to_sql().encode_format(),p)).unzip(); + let (param_formats, params):(Vec<_>, Vec<_>) = params.into_iter().map(|p|->(i16, P){(p.borrow_to_sql().encode_format().into(),p)}).unzip(); let params = params.into_iter(); assert!( From dc591ff2ca5a51e4f7f3543e1321292b5a1dadea Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 23 Nov 2021 23:40:00 -0500 Subject: [PATCH 545/819] Fix handling of raw ident fields in derive --- postgres-derive-test/src/composites.rs | 23 +++++++++++++++++++++++ postgres-derive/src/composites.rs | 8 +++++++- postgres-derive/src/fromsql.rs | 6 +++--- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs index 5efd3944c..ed60bf48f 100644 --- a/postgres-derive-test/src/composites.rs +++ b/postgres-derive-test/src/composites.rs @@ -215,3 +215,26 @@ fn wrong_type() { .unwrap_err(); assert!(err.source().unwrap().is::()); } + +#[test] +fn raw_ident_field() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + r#type: String, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + type TEXT + )", + ) + .unwrap(); + + let item = InventoryItem { + r#type: "foo".to_owned(), + }; + + test_type(&mut conn, "inventory_item", &[(item, "ROW('foo')")]); +} diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index f5599d375..c1e495154 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -14,7 +14,13 @@ impl Field { let ident = raw.ident.as_ref().unwrap().clone(); Ok(Field { - name: overrides.name.unwrap_or_else(|| ident.to_string()), + name: overrides.name.unwrap_or_else(|| { + let name = ident.to_string(); + match name.strip_prefix("r#") { + Some(name) => name.to_string(), + None => name, + } + }), ident, type_: raw.ty.clone(), }) diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index e1ab6ffa7..3a59d6226 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -1,5 +1,5 @@ -use proc_macro2::{Span, TokenStream}; -use quote::quote; +use proc_macro2::TokenStream; +use quote::{format_ident, quote}; use std::iter; use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; @@ -119,7 +119,7 @@ fn domain_body(ident: &Ident, field: &syn::Field) -> TokenStream { fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { let temp_vars = &fields .iter() - .map(|f| Ident::new(&format!("__{}", f.ident), Span::call_site())) + .map(|f| format_ident!("__{}", f.ident)) .collect::>(); let field_names = &fields.iter().map(|f| &f.name).collect::>(); let field_idents = &fields.iter().map(|f| &f.ident).collect::>(); From c5591c810ccba977bbf819b5d290d30e26300a3f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 23 Nov 2021 23:50:27 -0500 Subject: [PATCH 546/819] Release postgres-derive v0.4.1 --- postgres-derive/CHANGELOG.md | 6 ++++++ postgres-derive/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 354f6f277..9bb3a752f 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.1 - 2021-11-23 + +### Fixed + +* Fixed handling of struct fields using raw identifiers. + ## v0.4.0 - 2019-12-23 No changes diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 293c294a0..1ce243a58 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.0" +version = "0.4.1" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" From 8ead6e6c69e049c6b7ca67432e781a7429f76056 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 8 Dec 2021 18:30:44 -0500 Subject: [PATCH 547/819] Update hash crates --- postgres-protocol/Cargo.toml | 6 +++--- postgres-protocol/src/authentication/sasl.rs | 4 ++-- postgres-protocol/src/password/mod.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index a4ed3e907..638778f22 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,9 +13,9 @@ base64 = "0.13" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" -hmac = "0.11" -md-5 = "0.9" +hmac = "0.12" +md-5 = "0.10" memchr = "2.0" rand = "0.8" -sha2 = "0.9" +sha2 = "0.10" stringprep = "0.1" diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index a3704ce16..ea2f55cad 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,6 +1,6 @@ //! SASL-based authentication support. -use hmac::{Hmac, Mac, NewMac}; +use hmac::{Hmac, Mac}; use rand::{self, Rng}; use sha2::digest::FixedOutput; use sha2::{Digest, Sha256}; @@ -275,7 +275,7 @@ impl ScramSha256 { let mut hmac = Hmac::::new_from_slice(&server_key) .expect("HMAC is able to accept all key sizes"); hmac.update(auth_message.as_bytes()); - hmac.verify(&verifier) + hmac.verify_slice(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) } } diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs index 1b32ae8f8..a60687bbe 100644 --- a/postgres-protocol/src/password/mod.rs +++ b/postgres-protocol/src/password/mod.rs @@ -7,7 +7,7 @@ //! end up in logs pg_stat displays, etc. use crate::authentication::sasl; -use hmac::{Hmac, Mac, NewMac}; +use hmac::{Hmac, Mac}; use md5::Md5; use rand::RngCore; use sha2::digest::FixedOutput; From 76cd380e5a70d76f4f73219385e906ea3d6be7f9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 8 Dec 2021 18:35:18 -0500 Subject: [PATCH 548/819] clippy --- tokio-postgres/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index eb4e5bdc5..c026cca4f 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -780,7 +780,7 @@ impl<'a> UrlParser<'a> { } fn take_all(&mut self) -> &'a str { - mem::replace(&mut self.s, "") + mem::take(&mut self.s) } fn eat_byte(&mut self) { From 630f179892c9030119bf80df97aa05fef2dea525 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 10 Dec 2021 19:21:59 -0500 Subject: [PATCH 549/819] Release postgres-protocol v0.6.3 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index eb37f5883..5d9cecd01 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.6.3 - 2021-12-10 + +### Changed + +* Upgraded `hmac`, `md-5` and `sha`. + ## v0.6.2 - 2021-09-29 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 638778f22..2010e88ad 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.2" +version = "0.6.3" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From c516805275aaaf106e8e512f53a9b0234f707583 Mon Sep 17 00:00:00 2001 From: Lachezar Lechev Date: Thu, 16 Dec 2021 10:21:22 +0200 Subject: [PATCH 550/819] impl BorrowToSql for: - Box - Box --- postgres-types/src/lib.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2a953db2f..010b06adc 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1044,6 +1044,23 @@ impl BorrowToSql for &dyn ToSql { } } +impl sealed::Sealed for Box {} + +impl BorrowToSql for Box { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + self.as_ref() + } +} + +impl sealed::Sealed for Box {} +impl BorrowToSql for Box { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + self.as_ref() + } +} + impl sealed::Sealed for &(dyn ToSql + Sync) {} /// In async contexts it is sometimes necessary to have the additional From 35f4c0aeefef139e682ebead2db47397be513e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ABl=20Obr=C3=A9jan?= Date: Sun, 26 Dec 2021 19:13:03 +0100 Subject: [PATCH 551/819] Implement `ToSql` & `FromSql` for `Box` --- postgres-types/src/lib.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2a953db2f..84354cf3b 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -584,6 +584,18 @@ impl<'a> FromSql<'a> for String { } } +impl<'a> FromSql<'a> for Box { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { + types::text_from_sql(raw) + .map(ToString::to_string) + .map(String::into_boxed_str) + } + + fn accepts(ty: &Type) -> bool { + <&str as FromSql>::accepts(ty) + } +} + impl<'a> FromSql<'a> for &'a str { fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { types::text_from_sql(raw) @@ -933,6 +945,18 @@ impl ToSql for String { to_sql_checked!(); } +impl ToSql for Box { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&str as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + macro_rules! simple_to { ($t:ty, $f:ident, $($expected:ident),+) => { impl ToSql for $t { From 4561d44661d0367c5f7792eaf8086351b7eb673f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ABl=20Obr=C3=A9jan?= Date: Sun, 26 Dec 2021 20:43:14 +0100 Subject: [PATCH 552/819] Add `#[postgres(transparent)]` --- .../src/compile-fail/invalid-transparent.rs | 35 ++++++ .../compile-fail/invalid-transparent.stderr | 49 ++++++++ postgres-derive-test/src/lib.rs | 1 + postgres-derive-test/src/transparent.rs | 18 +++ postgres-derive/src/accepts.rs | 8 ++ postgres-derive/src/fromsql.rs | 37 +++++- postgres-derive/src/lib.rs | 7 +- postgres-derive/src/overrides.rs | 20 +++- postgres-derive/src/tosql.rs | 107 ++++++++++++------ postgres-types/src/lib.rs | 15 +++ 10 files changed, 251 insertions(+), 46 deletions(-) create mode 100644 postgres-derive-test/src/compile-fail/invalid-transparent.rs create mode 100644 postgres-derive-test/src/compile-fail/invalid-transparent.stderr create mode 100644 postgres-derive-test/src/transparent.rs diff --git a/postgres-derive-test/src/compile-fail/invalid-transparent.rs b/postgres-derive-test/src/compile-fail/invalid-transparent.rs new file mode 100644 index 000000000..43bd48266 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-transparent.rs @@ -0,0 +1,35 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +struct ToSqlTransparentStruct { + a: i32 +} + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +struct FromSqlTransparentStruct { + a: i32 +} + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +enum ToSqlTransparentEnum { + Foo +} + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +enum FromSqlTransparentEnum { + Foo +} + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +struct ToSqlTransparentTwoFieldTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +struct FromSqlTransparentTwoFieldTupleStruct(i32, i32); + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/invalid-transparent.stderr b/postgres-derive-test/src/compile-fail/invalid-transparent.stderr new file mode 100644 index 000000000..42e49f874 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-transparent.stderr @@ -0,0 +1,49 @@ +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:4:1 + | +4 | / #[postgres(transparent)] +5 | | struct ToSqlTransparentStruct { +6 | | a: i32 +7 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:10:1 + | +10 | / #[postgres(transparent)] +11 | | struct FromSqlTransparentStruct { +12 | | a: i32 +13 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:16:1 + | +16 | / #[postgres(transparent)] +17 | | enum ToSqlTransparentEnum { +18 | | Foo +19 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:22:1 + | +22 | / #[postgres(transparent)] +23 | | enum FromSqlTransparentEnum { +24 | | Foo +25 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:28:1 + | +28 | / #[postgres(transparent)] +29 | | struct ToSqlTransparentTwoFieldTupleStruct(i32, i32); + | |_____________________________________________________^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:32:1 + | +32 | / #[postgres(transparent)] +33 | | struct FromSqlTransparentTwoFieldTupleStruct(i32, i32); + | |_______________________________________________________^ diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index 7da75af8f..279ed1419 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -7,6 +7,7 @@ use std::fmt; mod composites; mod domains; mod enums; +mod transparent; pub fn test_type(conn: &mut Client, sql_type: &str, checks: &[(T, S)]) where diff --git a/postgres-derive-test/src/transparent.rs b/postgres-derive-test/src/transparent.rs new file mode 100644 index 000000000..1614553d2 --- /dev/null +++ b/postgres-derive-test/src/transparent.rs @@ -0,0 +1,18 @@ +use postgres::{Client, NoTls}; +use postgres_types::{FromSql, ToSql}; + +#[test] +fn round_trip() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(transparent)] + struct UserId(i32); + + assert_eq!( + Client::connect("user=postgres host=localhost port=5433", NoTls) + .unwrap() + .query_one("SELECT $1::integer", &[&UserId(123)]) + .unwrap() + .get::<_, UserId>(0), + UserId(123) + ); +} diff --git a/postgres-derive/src/accepts.rs b/postgres-derive/src/accepts.rs index 530badd0b..63473863a 100644 --- a/postgres-derive/src/accepts.rs +++ b/postgres-derive/src/accepts.rs @@ -6,6 +6,14 @@ use syn::Ident; use crate::composites::Field; use crate::enums::Variant; +pub fn transparent_body(field: &syn::Field) -> TokenStream { + let ty = &field.ty; + + quote! { + <#ty as ::postgres_types::ToSql>::accepts(type_) + } +} + pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { let ty = &field.ty; diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 3a59d6226..c89cbb5e2 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -11,9 +11,36 @@ use crate::overrides::Overrides; pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs)?; + if overrides.name.is_some() && overrides.transparent { + return Err(Error::new_spanned( + &input, + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")]", + )); + } + let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); - let (accepts_body, to_sql_body) = match input.data { + let (accepts_body, to_sql_body) = if overrides.transparent { + match input.data { + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + ( + accepts::transparent_body(field), + transparent_body(&input.ident, field), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(transparent)] may only be applied to single field tuple structs", + )) + } + } + } else { + match input.data { Data::Enum(ref data) => { let variants = data .variants @@ -55,6 +82,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { "#[derive(FromSql)] may only be applied to structs, single field tuple structs, and enums", )) } + } }; let ident = &input.ident; @@ -77,6 +105,13 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { Ok(out) } +fn transparent_body(ident: &Ident, field: &syn::Field) -> TokenStream { + let ty = &field.ty; + quote! { + <#ty as postgres_types::FromSql>::from_sql(_type, buf).map(#ident) + } +} + fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { let variant_names = variants.iter().map(|v| &v.name); let idents = iter::repeat(ident); diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index fd17b9de6..98e6add24 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -4,6 +4,7 @@ extern crate proc_macro; use proc_macro::TokenStream; +use syn::parse_macro_input; mod accepts; mod composites; @@ -14,7 +15,8 @@ mod tosql; #[proc_macro_derive(ToSql, attributes(postgres))] pub fn derive_tosql(input: TokenStream) -> TokenStream { - let input = syn::parse(input).unwrap(); + let input = parse_macro_input!(input); + tosql::expand_derive_tosql(input) .unwrap_or_else(|e| e.to_compile_error()) .into() @@ -22,7 +24,8 @@ pub fn derive_tosql(input: TokenStream) -> TokenStream { #[proc_macro_derive(FromSql, attributes(postgres))] pub fn derive_fromsql(input: TokenStream) -> TokenStream { - let input = syn::parse(input).unwrap(); + let input = parse_macro_input!(input); + fromsql::expand_derive_fromsql(input) .unwrap_or_else(|e| e.to_compile_error()) .into() diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index 08e6f3a77..c00d5a94b 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -2,17 +2,18 @@ use syn::{Attribute, Error, Lit, Meta, NestedMeta}; pub struct Overrides { pub name: Option, + pub transparent: bool, } impl Overrides { pub fn extract(attrs: &[Attribute]) -> Result { - let mut overrides = Overrides { name: None }; + let mut overrides = Overrides { + name: None, + transparent: false, + }; for attr in attrs { - let attr = match attr.parse_meta() { - Ok(meta) => meta, - Err(_) => continue, - }; + let attr = attr.parse_meta()?; if !attr.path().is_ident("postgres") { continue; @@ -39,7 +40,14 @@ impl Overrides { overrides.name = Some(value); } - bad => return Err(Error::new_spanned(bad, "expected a name-value meta item")), + NestedMeta::Meta(Meta::Path(ref path)) => { + if !path.is_ident("transparent") { + return Err(Error::new_spanned(path, "unknown override")); + } + + overrides.transparent = true; + } + bad => return Err(Error::new_spanned(bad, "unknown attribute")), } } } diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 1808e787d..96f261385 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -11,46 +11,73 @@ use crate::overrides::Overrides; pub fn expand_derive_tosql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs)?; + if overrides.name.is_some() && overrides.transparent { + return Err(Error::new_spanned( + &input, + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")]", + )); + } + let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); - let (accepts_body, to_sql_body) = match input.data { - Data::Enum(ref data) => { - let variants = data - .variants - .iter() - .map(Variant::parse) - .collect::, _>>()?; - ( - accepts::enum_body(&name, &variants), - enum_body(&input.ident, &variants), - ) - } - Data::Struct(DataStruct { - fields: Fields::Unnamed(ref fields), - .. - }) if fields.unnamed.len() == 1 => { - let field = fields.unnamed.first().unwrap(); - (accepts::domain_body(&name, field), domain_body()) - } - Data::Struct(DataStruct { - fields: Fields::Named(ref fields), - .. - }) => { - let fields = fields - .named - .iter() - .map(Field::parse) - .collect::, _>>()?; - ( - accepts::composite_body(&name, "ToSql", &fields), - composite_body(&fields), - ) + let (accepts_body, to_sql_body) = if overrides.transparent { + match input.data { + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + + (accepts::transparent_body(field), transparent_body()) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(transparent)] may only be applied to single field tuple structs", + )); + } } - _ => { - return Err(Error::new_spanned( - input, - "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", - )); + } else { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(Variant::parse) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants), + enum_body(&input.ident, &variants), + ) + } + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + + (accepts::domain_body(&name, field), domain_body()) + } + Data::Struct(DataStruct { + fields: Fields::Named(ref fields), + .. + }) => { + let fields = fields + .named + .iter() + .map(Field::parse) + .collect::, _>>()?; + ( + accepts::composite_body(&name, "ToSql", &fields), + composite_body(&fields), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", + )); + } } }; @@ -78,6 +105,12 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { Ok(out) } +fn transparent_body() -> TokenStream { + quote! { + postgres_types::ToSql::to_sql(&self.0, _type, buf) + } +} + fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { let idents = iter::repeat(ident); let variant_idents = variants.iter().map(|v| &v.ident); diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2a953db2f..e409051e8 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -55,6 +55,21 @@ //! struct SessionId(Vec); //! ``` //! +//! ## Newtypes +//! +//! The `#[postgres(transparent)]` attribute can be used on a single-field tuple struct to create a +//! Rust-only wrapper type that will use the [`ToSql`] & [`FromSql`] implementation of the inner +//! value : +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(transparent)] +//! struct UserId(i32); +//! ``` +//! //! ## Composites //! //! Postgres composite types correspond to structs in Rust: From 842e5cfdcb2c3f4fec3d394ccd1e8b91e2e8985b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 13:15:32 +0000 Subject: [PATCH 553/819] Update parking_lot requirement from 0.11 to 0.12 Updates the requirements on [parking_lot](https://github.com/Amanieu/parking_lot) to permit the latest version. - [Release notes](https://github.com/Amanieu/parking_lot/releases) - [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md) - [Commits](https://github.com/Amanieu/parking_lot/compare/0.11.0...0.12.0) --- updated-dependencies: - dependency-name: parking_lot dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 5974fe64f..7d898e269 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -46,7 +46,7 @@ byteorder = "1.0" fallible-iterator = "0.2" futures = "0.3" log = "0.4" -parking_lot = "0.11" +parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.10" From a07a39cc875b95565908125d66c1dfb6682d406a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 31 Jan 2022 09:11:29 -0500 Subject: [PATCH 554/819] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e38dea88d..520d665f8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.51.0 + version: 1.53.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From 7fd748ba96d3056a1a2315799661d7e9e849deb7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Feb 2022 13:47:41 +0000 Subject: [PATCH 555/819] Update tokio-util requirement from 0.6 to 0.7 Updates the requirements on [tokio-util](https://github.com/tokio-rs/tokio) to permit the latest version. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/commits) --- updated-dependencies: - dependency-name: tokio-util dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 7d898e269..94371af51 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -54,7 +54,7 @@ postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } postgres-types = { version = "0.2.2", path = "../postgres-types" } socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } -tokio-util = { version = "0.6", features = ["codec"] } +tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] tokio = { version = "1.0", features = ["full"] } From 9685f9c532f10bd99339e2dcddaad3b462c5b687 Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Wed, 16 Mar 2022 14:13:36 +1000 Subject: [PATCH 556/819] Add ToSql / FromSql for IpInet and IpCidr from cidr crate --- postgres-types/Cargo.toml | 2 ++ postgres-types/src/cidr_02.rs | 44 +++++++++++++++++++++++++++++++++++ postgres-types/src/lib.rs | 2 ++ 3 files changed, 48 insertions(+) create mode 100644 postgres-types/src/cidr_02.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 7eca3fbcf..1954d51bb 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -14,6 +14,7 @@ categories = ["database"] derive = ["postgres-derive"] array-impls = ["array-init"] with-bit-vec-0_6 = ["bit-vec-06"] +with-cidr-0_2 = ["cidr-02"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] with-eui48-1 = ["eui48-1"] @@ -32,6 +33,7 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } +cidr-02 = { version = "0.2", package = "cidr", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } eui48-1 = { version = "1.0", package = "eui48", optional = true } diff --git a/postgres-types/src/cidr_02.rs b/postgres-types/src/cidr_02.rs new file mode 100644 index 000000000..46e904483 --- /dev/null +++ b/postgres-types/src/cidr_02.rs @@ -0,0 +1,44 @@ +use bytes::BytesMut; +use cidr_02::{IpCidr, IpInet}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for IpCidr { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpCidr::new(inet.addr(), inet.netmask()).expect("postgres cidr type has zeroed host portion")) + } + + accepts!(CIDR); +} + +impl ToSql for IpCidr { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.first_address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(CIDR); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for IpInet { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpInet::new(inet.addr(), inet.netmask()).expect("postgres enforces maximum length of netmask")) + } + + accepts!(INET); +} + +impl ToSql for IpInet { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(INET); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 0247b90b7..b1a45bab1 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -210,6 +210,8 @@ where #[cfg(feature = "with-bit-vec-0_6")] mod bit_vec_06; +#[cfg(feature = "with-cidr-0_2")] +mod cidr_02; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; #[cfg(feature = "with-eui48-0_4")] From dd7bc073f7a7dfd1a1dd9c3e90e5d9d1630a2824 Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Wed, 16 Mar 2022 14:32:50 +1000 Subject: [PATCH 557/819] Document cidr type conversion and run rustfmt --- postgres-types/Cargo.toml | 2 +- postgres-types/src/cidr_02.rs | 6 ++++-- postgres-types/src/lib.rs | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 1954d51bb..9d470f37b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -33,8 +33,8 @@ postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-deri array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } -cidr-02 = { version = "0.2", package = "cidr", optional = true } chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } +cidr-02 = { version = "0.2", package = "cidr", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } eui48-1 = { version = "1.0", package = "eui48", optional = true } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } diff --git a/postgres-types/src/cidr_02.rs b/postgres-types/src/cidr_02.rs index 46e904483..d4e4965c5 100644 --- a/postgres-types/src/cidr_02.rs +++ b/postgres-types/src/cidr_02.rs @@ -8,7 +8,8 @@ use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for IpCidr { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let inet = types::inet_from_sql(raw)?; - Ok(IpCidr::new(inet.addr(), inet.netmask()).expect("postgres cidr type has zeroed host portion")) + Ok(IpCidr::new(inet.addr(), inet.netmask()) + .expect("postgres cidr type has zeroed host portion")) } accepts!(CIDR); @@ -27,7 +28,8 @@ impl ToSql for IpCidr { impl<'a> FromSql<'a> for IpInet { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let inet = types::inet_from_sql(raw)?; - Ok(IpInet::new(inet.addr(), inet.netmask()).expect("postgres enforces maximum length of netmask")) + Ok(IpInet::new(inet.addr(), inet.netmask()) + .expect("postgres enforces maximum length of netmask")) } accepts!(INET); diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index b1a45bab1..394f938ff 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -210,10 +210,10 @@ where #[cfg(feature = "with-bit-vec-0_6")] mod bit_vec_06; -#[cfg(feature = "with-cidr-0_2")] -mod cidr_02; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; +#[cfg(feature = "with-cidr-0_2")] +mod cidr_02; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; #[cfg(feature = "with-eui48-1")] @@ -438,6 +438,8 @@ impl WrongType { /// | `uuid::Uuid` | UUID | /// | `bit_vec::BitVec` | BIT, VARBIT | /// | `eui48::MacAddress` | MACADDR | +/// | `cidr::InetCidr` | CIDR | +/// | `cidr::InetAddr` | INET | /// /// # Nullability /// From 27039f6c3a9f05a41b657f1db5489d055363e2a8 Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Thu, 17 Mar 2022 09:31:13 +1000 Subject: [PATCH 558/819] Change error handling in `cidr` `FromSql` implementations --- postgres-types/src/cidr_02.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/cidr_02.rs b/postgres-types/src/cidr_02.rs index d4e4965c5..2de952c3c 100644 --- a/postgres-types/src/cidr_02.rs +++ b/postgres-types/src/cidr_02.rs @@ -8,8 +8,7 @@ use crate::{FromSql, IsNull, ToSql, Type}; impl<'a> FromSql<'a> for IpCidr { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let inet = types::inet_from_sql(raw)?; - Ok(IpCidr::new(inet.addr(), inet.netmask()) - .expect("postgres cidr type has zeroed host portion")) + Ok(IpCidr::new(inet.addr(), inet.netmask())?) } accepts!(CIDR); @@ -28,8 +27,7 @@ impl ToSql for IpCidr { impl<'a> FromSql<'a> for IpInet { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let inet = types::inet_from_sql(raw)?; - Ok(IpInet::new(inet.addr(), inet.netmask()) - .expect("postgres enforces maximum length of netmask")) + Ok(IpInet::new(inet.addr(), inet.netmask())?) } accepts!(INET); From 944b72974f751ecd6ac72447af753cec7b88320e Mon Sep 17 00:00:00 2001 From: Matt Oliver Date: Thu, 3 Mar 2022 00:06:46 -0600 Subject: [PATCH 559/819] Add ltree, lquery and ltxtquery support --- postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/types/mod.rs | 16 ++++++ postgres-types/Cargo.toml | 4 +- postgres-types/src/lib.rs | 44 +++++++++++---- tokio-postgres/Cargo.toml | 6 +-- tokio-postgres/tests/test/types/mod.rs | 75 ++++++++++++++++++++++++++ 6 files changed, 131 insertions(+), 16 deletions(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 2010e88ad..a4716907b 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.3" +version = "0.6.4" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index a595f5a30..5939d9f00 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -1059,3 +1059,19 @@ impl Inet { self.netmask } } + +/// Serializes a Postgres l{tree,query,txtquery} string +#[inline] +pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an Ltree string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres l{tree,query,txtquery} string +#[inline] +pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + // Remove the version number from the front of the string per spec + Ok(str::from_utf8(&buf[1..])?) +} diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 9d470f37b..000d71ea0 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.2" +version = "0.2.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -28,7 +28,7 @@ with-time-0_3 = ["time-03"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 394f938ff..bf7a1caee 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -594,8 +594,8 @@ impl<'a> FromSql<'a> for &'a [u8] { } impl<'a> FromSql<'a> for String { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { - types::text_from_sql(raw).map(ToString::to_string) + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + <&str as FromSql>::from_sql(ty, raw).map(ToString::to_string) } fn accepts(ty: &Type) -> bool { @@ -604,8 +604,8 @@ impl<'a> FromSql<'a> for String { } impl<'a> FromSql<'a> for Box { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { - types::text_from_sql(raw) + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { + <&str as FromSql>::from_sql(ty, raw) .map(ToString::to_string) .map(String::into_boxed_str) } @@ -616,14 +616,26 @@ impl<'a> FromSql<'a> for Box { } impl<'a> FromSql<'a> for &'a str { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { - types::text_from_sql(raw) + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { + match *ty { + ref ty if ( + ty.name() == "ltree" || + ty.name() == "lquery" || + ty.name() == "ltxtquery" + ) => types::ltree_from_sql(raw), + _ => types::text_from_sql(raw) + } } fn accepts(ty: &Type) -> bool { match *ty { Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ty.name() == "citext" => true, + ref ty if ( + ty.name() == "citext" || + ty.name() == "ltree" || + ty.name() == "lquery" || + ty.name() == "ltxtquery" + ) => true, _ => false, } } @@ -924,15 +936,27 @@ impl ToSql for Vec { } impl<'a> ToSql for &'a str { - fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::text_to_sql(*self, w); + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + match ty { + ref ty if ( + ty.name() == "ltree" || + ty.name() == "lquery" || + ty.name() == "ltxtquery" + ) => types::ltree_to_sql(*self, w), + _ => types::text_to_sql(*self, w) + } Ok(IsNull::No) } fn accepts(ty: &Type) -> bool { match *ty { Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ty.name() == "citext" => true, + ref ty if ( + ty.name() == "citext" || + ty.name() == "ltree" || + ty.name() == "lquery" || + ty.name() == "ltxtquery" + ) => true, _ => false, } } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 94371af51..82e71fb1c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.5" +version = "0.7.6" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -50,8 +50,8 @@ parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.10" -postgres-protocol = { version = "0.6.1", path = "../postgres-protocol" } -postgres-types = { version = "0.2.2", path = "../postgres-types" } +postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } +postgres-types = { version = "0.2.3", path = "../postgres-types" } socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 604e2de32..0ec329a4f 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -648,3 +648,78 @@ async fn inet() { ) .await; } + +#[tokio::test] +async fn ltree() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("ltree", &[ + (Some("b.c.d".to_owned()), "'b.c.d'"), + (None, "NULL"), + ]).await; +} + +#[tokio::test] +async fn ltree_any() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("ltree[]", &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["a.b.c".to_string()]), "ARRAY['a.b.c']"), + (Some(vec!["a.b.c".to_string(), "e.f.g".to_string()]), "ARRAY['a.b.c','e.f.g']"), + (None, "NULL"), + ]).await; +} + +#[tokio::test] +async fn lquery() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("lquery", &[ + (Some("b.c.d".to_owned()), "'b.c.d'"), + (Some("b.c.*".to_owned()), "'b.c.*'"), + (Some("b.*{1,2}.d|e".to_owned()), "'b.*{1,2}.d|e'"), + (None, "NULL"), + ]).await; +} + +#[tokio::test] +async fn lquery_any() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("lquery[]", &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["b.c.*".to_string()]), "ARRAY['b.c.*']"), + (Some(vec!["b.c.*".to_string(), "b.*{1,2}.d|e".to_string()]), "ARRAY['b.c.*','b.*{1,2}.d|e']"), + (None, "NULL"), + ]).await; +} + +#[tokio::test] +async fn ltxtquery() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("ltxtquery", &[ + (Some("b & c & d".to_owned()), "'b & c & d'"), + (Some("b@* & !c".to_owned()), "'b@* & !c'"), + (None, "NULL"), + ]).await; +} + +#[tokio::test] +async fn ltxtquery_any() { + let client = connect("user=postgres").await; + client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + + test_type("ltxtquery[]", &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["b & c & d".to_string()]), "ARRAY['b & c & d']"), + (Some(vec!["b & c & d".to_string(), "b@* & !c".to_string()]), "ARRAY['b & c & d','b@* & !c']"), + (None, "NULL"), + ]).await; +} From 6ae60d6d09cb32eb8eca645488e5d86d4f2a33bb Mon Sep 17 00:00:00 2001 From: Matt Oliver Date: Thu, 3 Mar 2022 08:20:29 -0600 Subject: [PATCH 560/819] Add types to type docs --- postgres-types/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index bf7a1caee..9580fb5c1 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -407,6 +407,7 @@ impl WrongType { /// | `f32` | REAL | /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME, UNKNOWN | +/// | | LTREE, LQUERY, LTXTQUERY | /// | `&[u8]`/`Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | @@ -739,6 +740,7 @@ pub enum IsNull { /// | `f32` | REAL | /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | +/// | | LTREE, LQUERY, LTXTQUERY | /// | `&[u8]`/`Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | From d9d283e131e3577bd82b292bbc8fa045c70e98fa Mon Sep 17 00:00:00 2001 From: Matt Oliver Date: Sat, 5 Mar 2022 14:55:07 -0600 Subject: [PATCH 561/819] Split out ltree,query,txtquery protocol parsers, add tests, rust fmt --- postgres-protocol/src/types/mod.rs | 51 +++++++++- postgres-protocol/src/types/test.rs | 116 +++++++++++++++++++++- postgres-types/src/lib.rs | 50 +++++----- tokio-postgres/tests/test/types/mod.rs | 128 +++++++++++++++++-------- 4 files changed, 274 insertions(+), 71 deletions(-) diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 5939d9f00..05f515f76 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -1060,18 +1060,59 @@ impl Inet { } } -/// Serializes a Postgres l{tree,query,txtquery} string +/// Serializes a Postgres ltree string #[inline] pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) { - // A version number is prepended to an Ltree string per spec + // A version number is prepended to an ltree string per spec buf.put_u8(1); // Append the rest of the query buf.put_slice(v.as_bytes()); } -/// Deserialize a Postgres l{tree,query,txtquery} string +/// Deserialize a Postgres ltree string #[inline] pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox> { - // Remove the version number from the front of the string per spec - Ok(str::from_utf8(&buf[1..])?) + match buf { + // Remove the version number from the front of the ltree per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("ltree version 1 only supported".into()), + } +} + +/// Serializes a Postgres lquery string +#[inline] +pub fn lquery_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an lquery string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres lquery string +#[inline] +pub fn lquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + match buf { + // Remove the version number from the front of the lquery per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("lquery version 1 only supported".into()), + } +} + +/// Serializes a Postgres ltxtquery string +#[inline] +pub fn ltxtquery_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an ltxtquery string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres ltxtquery string +#[inline] +pub fn ltxtquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + match buf { + // Remove the version number from the front of the ltxtquery per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("ltxtquery version 1 only supported".into()), + } } diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 7c20cf3ed..1ce49b66f 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -1,4 +1,4 @@ -use bytes::BytesMut; +use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use std::collections::HashMap; @@ -156,3 +156,117 @@ fn non_null_array() { assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); } + +#[test] +fn ltree_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let mut buf = BytesMut::new(); + + ltree_to_sql("A.B.C", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn ltree_str() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let success = match ltree_from_sql(query.as_slice()) { + Ok(_) => true, + _ => false, + }; + + assert!(success) +} + +#[test] +fn ltree_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let success = match ltree_from_sql(query.as_slice()) { + Err(_) => true, + _ => false, + }; + + assert!(success) +} + +#[test] +fn lquery_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let mut buf = BytesMut::new(); + + lquery_to_sql("A.B.C", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn lquery_str() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let success = match lquery_from_sql(query.as_slice()) { + Ok(_) => true, + _ => false, + }; + + assert!(success) +} + +#[test] +fn lquery_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let success = match lquery_from_sql(query.as_slice()) { + Err(_) => true, + _ => false, + }; + + assert!(success) +} + +#[test] +fn ltxtquery_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("a & b*".as_bytes()); + + let mut buf = BytesMut::new(); + + ltree_to_sql("a & b*", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn ltxtquery_str() { + let mut query = vec![1u8]; + query.extend_from_slice("a & b*".as_bytes()); + + let success = match ltree_from_sql(query.as_slice()) { + Ok(_) => true, + _ => false, + }; + + assert!(success) +} + +#[test] +fn ltxtquery_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("a & b*".as_bytes()); + + let success = match ltree_from_sql(query.as_slice()) { + Err(_) => true, + _ => false, + }; + + assert!(success) +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 9580fb5c1..d029d3948 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -619,24 +619,24 @@ impl<'a> FromSql<'a> for Box { impl<'a> FromSql<'a> for &'a str { fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { match *ty { - ref ty if ( - ty.name() == "ltree" || - ty.name() == "lquery" || - ty.name() == "ltxtquery" - ) => types::ltree_from_sql(raw), - _ => types::text_from_sql(raw) + ref ty if ty.name() == "ltree" => types::ltree_from_sql(raw), + ref ty if ty.name() == "lquery" => types::lquery_from_sql(raw), + ref ty if ty.name() == "ltxtquery" => types::ltxtquery_from_sql(raw), + _ => types::text_from_sql(raw), } } fn accepts(ty: &Type) -> bool { match *ty { Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ( - ty.name() == "citext" || - ty.name() == "ltree" || - ty.name() == "lquery" || - ty.name() == "ltxtquery" - ) => true, + ref ty + if (ty.name() == "citext" + || ty.name() == "ltree" + || ty.name() == "lquery" + || ty.name() == "ltxtquery") => + { + true + } _ => false, } } @@ -939,13 +939,11 @@ impl ToSql for Vec { impl<'a> ToSql for &'a str { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - match ty { - ref ty if ( - ty.name() == "ltree" || - ty.name() == "lquery" || - ty.name() == "ltxtquery" - ) => types::ltree_to_sql(*self, w), - _ => types::text_to_sql(*self, w) + match *ty { + ref ty if ty.name() == "ltree" => types::ltree_to_sql(*self, w), + ref ty if ty.name() == "lquery" => types::lquery_to_sql(*self, w), + ref ty if ty.name() == "ltxtquery" => types::ltxtquery_to_sql(*self, w), + _ => types::text_to_sql(*self, w), } Ok(IsNull::No) } @@ -953,12 +951,14 @@ impl<'a> ToSql for &'a str { fn accepts(ty: &Type) -> bool { match *ty { Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ( - ty.name() == "citext" || - ty.name() == "ltree" || - ty.name() == "lquery" || - ty.name() == "ltxtquery" - ) => true, + ref ty + if (ty.name() == "citext" + || ty.name() == "ltree" + || ty.name() == "lquery" + || ty.name() == "ltxtquery") => + { + true + } _ => false, } } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 0ec329a4f..f69932e55 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -652,74 +652,122 @@ async fn inet() { #[tokio::test] async fn ltree() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); - test_type("ltree", &[ - (Some("b.c.d".to_owned()), "'b.c.d'"), - (None, "NULL"), - ]).await; + test_type( + "ltree", + &[(Some("b.c.d".to_owned()), "'b.c.d'"), (None, "NULL")], + ) + .await; } #[tokio::test] async fn ltree_any() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); - test_type("ltree[]", &[ - (Some(vec![]), "ARRAY[]"), - (Some(vec!["a.b.c".to_string()]), "ARRAY['a.b.c']"), - (Some(vec!["a.b.c".to_string(), "e.f.g".to_string()]), "ARRAY['a.b.c','e.f.g']"), - (None, "NULL"), - ]).await; + test_type( + "ltree[]", + &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["a.b.c".to_string()]), "ARRAY['a.b.c']"), + ( + Some(vec!["a.b.c".to_string(), "e.f.g".to_string()]), + "ARRAY['a.b.c','e.f.g']", + ), + (None, "NULL"), + ], + ) + .await; } #[tokio::test] async fn lquery() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); - test_type("lquery", &[ - (Some("b.c.d".to_owned()), "'b.c.d'"), - (Some("b.c.*".to_owned()), "'b.c.*'"), - (Some("b.*{1,2}.d|e".to_owned()), "'b.*{1,2}.d|e'"), - (None, "NULL"), - ]).await; + test_type( + "lquery", + &[ + (Some("b.c.d".to_owned()), "'b.c.d'"), + (Some("b.c.*".to_owned()), "'b.c.*'"), + (Some("b.*{1,2}.d|e".to_owned()), "'b.*{1,2}.d|e'"), + (None, "NULL"), + ], + ) + .await; } #[tokio::test] async fn lquery_any() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); - test_type("lquery[]", &[ - (Some(vec![]), "ARRAY[]"), - (Some(vec!["b.c.*".to_string()]), "ARRAY['b.c.*']"), - (Some(vec!["b.c.*".to_string(), "b.*{1,2}.d|e".to_string()]), "ARRAY['b.c.*','b.*{1,2}.d|e']"), - (None, "NULL"), - ]).await; + test_type( + "lquery[]", + &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["b.c.*".to_string()]), "ARRAY['b.c.*']"), + ( + Some(vec!["b.c.*".to_string(), "b.*{1,2}.d|e".to_string()]), + "ARRAY['b.c.*','b.*{1,2}.d|e']", + ), + (None, "NULL"), + ], + ) + .await; } #[tokio::test] async fn ltxtquery() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); - test_type("ltxtquery", &[ - (Some("b & c & d".to_owned()), "'b & c & d'"), - (Some("b@* & !c".to_owned()), "'b@* & !c'"), - (None, "NULL"), - ]).await; + test_type( + "ltxtquery", + &[ + (Some("b & c & d".to_owned()), "'b & c & d'"), + (Some("b@* & !c".to_owned()), "'b@* & !c'"), + (None, "NULL"), + ], + ) + .await; } #[tokio::test] async fn ltxtquery_any() { let client = connect("user=postgres").await; - client.execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]).await.unwrap(); - - test_type("ltxtquery[]", &[ - (Some(vec![]), "ARRAY[]"), - (Some(vec!["b & c & d".to_string()]), "ARRAY['b & c & d']"), - (Some(vec!["b & c & d".to_string(), "b@* & !c".to_string()]), "ARRAY['b & c & d','b@* & !c']"), - (None, "NULL"), - ]).await; + client + .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) + .await + .unwrap(); + + test_type( + "ltxtquery[]", + &[ + (Some(vec![]), "ARRAY[]"), + (Some(vec!["b & c & d".to_string()]), "ARRAY['b & c & d']"), + ( + Some(vec!["b & c & d".to_string(), "b@* & !c".to_string()]), + "ARRAY['b & c & d','b@* & !c']", + ), + (None, "NULL"), + ], + ) + .await; } From 6fae6552ecc5e6755360bd33e9ede3e51b7eb566 Mon Sep 17 00:00:00 2001 From: Matt Oliver Date: Wed, 16 Mar 2022 21:20:34 -0500 Subject: [PATCH 562/819] Fix tests, replace match with matches! --- docker/sql_setup.sh | 1 + postgres-protocol/src/types/test.rs | 42 ++++---------------------- tokio-postgres/tests/test/types/mod.rs | 36 ---------------------- 3 files changed, 7 insertions(+), 72 deletions(-) diff --git a/docker/sql_setup.sh b/docker/sql_setup.sh index 422dcbda9..0315ac805 100755 --- a/docker/sql_setup.sh +++ b/docker/sql_setup.sh @@ -96,4 +96,5 @@ psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL CREATE ROLE ssl_user LOGIN; CREATE EXTENSION hstore; CREATE EXTENSION citext; + CREATE EXTENSION ltree; EOSQL diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 1ce49b66f..6f1851fc2 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -174,12 +174,7 @@ fn ltree_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); - let success = match ltree_from_sql(query.as_slice()) { - Ok(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(ltree_from_sql(query.as_slice()), Ok(_))) } #[test] @@ -187,12 +182,7 @@ fn ltree_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); - let success = match ltree_from_sql(query.as_slice()) { - Err(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(ltree_from_sql(query.as_slice()), Err(_))) } #[test] @@ -212,12 +202,7 @@ fn lquery_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); - let success = match lquery_from_sql(query.as_slice()) { - Ok(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(lquery_from_sql(query.as_slice()), Ok(_))) } #[test] @@ -225,12 +210,7 @@ fn lquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); - let success = match lquery_from_sql(query.as_slice()) { - Err(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(lquery_from_sql(query.as_slice()), Err(_))) } #[test] @@ -250,12 +230,7 @@ fn ltxtquery_str() { let mut query = vec![1u8]; query.extend_from_slice("a & b*".as_bytes()); - let success = match ltree_from_sql(query.as_slice()) { - Ok(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(ltree_from_sql(query.as_slice()), Ok(_))) } #[test] @@ -263,10 +238,5 @@ fn ltxtquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("a & b*".as_bytes()); - let success = match ltree_from_sql(query.as_slice()) { - Err(_) => true, - _ => false, - }; - - assert!(success) + assert!(matches!(ltree_from_sql(query.as_slice()), Err(_))) } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index f69932e55..de700d791 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -651,12 +651,6 @@ async fn inet() { #[tokio::test] async fn ltree() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "ltree", &[(Some("b.c.d".to_owned()), "'b.c.d'"), (None, "NULL")], @@ -666,12 +660,6 @@ async fn ltree() { #[tokio::test] async fn ltree_any() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "ltree[]", &[ @@ -689,12 +677,6 @@ async fn ltree_any() { #[tokio::test] async fn lquery() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "lquery", &[ @@ -709,12 +691,6 @@ async fn lquery() { #[tokio::test] async fn lquery_any() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "lquery[]", &[ @@ -732,12 +708,6 @@ async fn lquery_any() { #[tokio::test] async fn ltxtquery() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "ltxtquery", &[ @@ -751,12 +721,6 @@ async fn ltxtquery() { #[tokio::test] async fn ltxtquery_any() { - let client = connect("user=postgres").await; - client - .execute("CREATE EXTENSION IF NOT EXISTS ltree;", &[]) - .await - .unwrap(); - test_type( "ltxtquery[]", &[ From 812dfa710ab131aaa5d483bf7df660b14cbacd8e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 16 Apr 2022 09:10:09 -0400 Subject: [PATCH 563/819] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 520d665f8..88f4170a5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.53.0 + version: 1.56.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From cf381ce6a2e44ca5ff13b1b2c34541d1e9af7199 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Fri, 15 Apr 2022 23:57:30 +0300 Subject: [PATCH 564/819] Hide `tokio_postgres::client::SocketConfig` behind "runtime" feature This fixes warnings reported by rust 1.60 when doing the following: ```shell $ cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features ``` --- tokio-postgres/src/client.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index dea77da94..42d507fda 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,5 +1,7 @@ use crate::codec::{BackendMessages, FrontendMessage}; -use crate::config::{Host, SslMode}; +#[cfg(feature = "runtime")] +use crate::config::Host; +use crate::config::SslMode; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; use crate::query::RowStream; @@ -25,6 +27,7 @@ use std::collections::HashMap; use std::fmt; use std::sync::Arc; use std::task::{Context, Poll}; +#[cfg(feature = "runtime")] use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; @@ -145,6 +148,7 @@ impl InnerClient { } } +#[cfg(feature = "runtime")] #[derive(Clone)] pub(crate) struct SocketConfig { pub host: Host, From 1d9c93d2aeff5dbb93f9d6e0d59c5f817a6162b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Miku=C5=82a?= Date: Wed, 20 Apr 2022 16:52:47 +0200 Subject: [PATCH 565/819] Add conversions from Uuid 1.0 --- postgres-types/Cargo.toml | 2 ++ postgres-types/src/lib.rs | 2 ++ postgres-types/src/uuid_1.rs | 25 +++++++++++++++++++++++ postgres/Cargo.toml | 1 + postgres/src/lib.rs | 1 + tokio-postgres/Cargo.toml | 2 ++ tokio-postgres/src/lib.rs | 1 + tokio-postgres/tests/test/types/mod.rs | 2 ++ tokio-postgres/tests/test/types/uuid_1.rs | 18 ++++++++++++++++ 9 files changed, 54 insertions(+) create mode 100644 postgres-types/src/uuid_1.rs create mode 100644 tokio-postgres/tests/test/types/uuid_1.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 000d71ea0..e0ccb9493 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -22,6 +22,7 @@ with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-uuid-0_8 = ["uuid-08"] +with-uuid-1 = ["uuid-1"] with-time-0_2 = ["time-02"] with-time-0_3 = ["time-03"] @@ -42,5 +43,6 @@ geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } +uuid-1 = { version = "1.0", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } time-03 = { version = "0.3", package = "time", default-features = false, optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index d029d3948..f9c1c5ce5 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -230,6 +230,8 @@ mod time_02; mod time_03; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; +#[cfg(feature = "with-uuid-1")] +mod uuid_1; // The time::{date, time} macros produce compile errors if the crate package is renamed. #[cfg(feature = "with-time-0_2")] diff --git a/postgres-types/src/uuid_1.rs b/postgres-types/src/uuid_1.rs new file mode 100644 index 000000000..d9969f60c --- /dev/null +++ b/postgres-types/src/uuid_1.rs @@ -0,0 +1,25 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; +use uuid_1::Uuid; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Uuid { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let bytes = types::uuid_from_sql(raw)?; + Ok(Uuid::from_bytes(bytes)) + } + + accepts!(UUID); +} + +impl ToSql for Uuid { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::uuid_to_sql(*self.as_bytes(), w); + Ok(IsNull::No) + } + + accepts!(UUID); + to_sql_checked!(); +} diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index b61e42aca..12f9e98ef 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -30,6 +30,7 @@ with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] +with-uuid-1 = ["tokio-postgres/with-uuid-1"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] with-time-0_3 = ["tokio-postgres/with-time-0_3"] diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index a599532e4..fbe85cbde 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -61,6 +61,7 @@ //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | +//! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | //! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | //! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(clippy::all, rust_2018_idioms, missing_docs)] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 82e71fb1c..acb4d04e9 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -36,6 +36,7 @@ with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] +with-uuid-1 = ["postgres-types/with-uuid-1"] with-time-0_2 = ["postgres-types/with-time-0_2"] with-time-0_3 = ["postgres-types/with-time-0_3"] @@ -70,5 +71,6 @@ geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } uuid-08 = { version = "0.8", package = "uuid" } +uuid-1 = { version = "1.0", package = "uuid" } time-02 = { version = "0.2", package = "time" } time-03 = { version = "0.3", package = "time", features = ["parsing"] } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index e9516e0b3..9afb93801 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -112,6 +112,7 @@ //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | +//! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | //! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | //! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![doc(html_root_url = "https://docs.rs/tokio-postgres/0.7")] diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index de700d791..e3fd663a5 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -33,6 +33,8 @@ mod time_02; mod time_03; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; +#[cfg(feature = "with-uuid-1")] +mod uuid_1; async fn test_type(sql_type: &str, checks: &[(T, S)]) where diff --git a/tokio-postgres/tests/test/types/uuid_1.rs b/tokio-postgres/tests/test/types/uuid_1.rs new file mode 100644 index 000000000..0eb89be8f --- /dev/null +++ b/tokio-postgres/tests/test/types/uuid_1.rs @@ -0,0 +1,18 @@ +use uuid_1::Uuid; + +use crate::types::test_type; + +#[tokio::test] +async fn test_uuid_params() { + test_type( + "UUID", + &[ + ( + Some(Uuid::parse_str("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11").unwrap()), + "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", + ), + (None, "NULL"), + ], + ) + .await +} From da78d4ea065d976bf4e2d711b3718055a63c9f77 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Apr 2022 08:24:30 -0400 Subject: [PATCH 566/819] Release postgres-protocol v0.6.4 --- postgres-protocol/CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 5d9cecd01..d84f29ded 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.6.4 - 2022-04-03 + +### Added + +* Added parsing support for `ltree`, `lquery`, and `ltxtquery`. + ## v0.6.3 - 2021-12-10 ### Changed From 1d8aa0ad87873429aa2472102bca3365a73f24ce Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Apr 2022 08:28:06 -0400 Subject: [PATCH 567/819] Release postgres-derive v0.4.2 --- postgres-derive/CHANGELOG.md | 6 ++++++ postgres-derive/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 9bb3a752f..1cc55bfe8 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.2 - 2022-04-30 + +### Added + +* Added support for transparent wrapper types. + ## v0.4.1 - 2021-11-23 ### Fixed diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 1ce243a58..324400162 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.1" +version = "0.4.2" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" From 695067c299e5ada849f294859be705348aecbf21 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Apr 2022 08:40:30 -0400 Subject: [PATCH 568/819] Release postgres-types v0.2.3 --- postgres-types/CHANGELOG.md | 10 ++++++++++ postgres-types/Cargo.toml | 6 ++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index f8b2835eb..d2284dbd5 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,15 @@ # Change Log +## v0.2.3 - 2022-04-30 + +### Added + +* Added `ToSql` and `FromSql` implementations for `Box`. +* Added `BorrowToSql` implementations for `Box` and `Box`. +* Added support for `cidr` 0.2 via the `with-cidr-02` feature. +* Added conversions between the `LTREE`, `LQUERY` and `LTXTQUERY` types and Rust strings. +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + ## v0.2.2 - 2021-09-29 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index e0ccb9493..d8f147022 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -30,11 +30,13 @@ with-time-0_3 = ["time-03"] bytes = "1.0" fallible-iterator = "0.2" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } -postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } +postgres-derive = { version = "0.4.2", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } -chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = ["clock"], optional = true } +chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = [ + "clock", +], optional = true } cidr-02 = { version = "0.2", package = "cidr", optional = true } eui48-04 = { version = "0.4", package = "eui48", optional = true } eui48-1 = { version = "1.0", package = "eui48", optional = true } From 17af0d5b6884839c4d7d4c3fb85347f9f1e3e633 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Apr 2022 08:53:14 -0400 Subject: [PATCH 569/819] Release tokio-postgres v0.7.6 --- tokio-postgres/CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index eca196f06..b294aec69 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,16 @@ # Change Log +## v0.7.6 - 2022-04-30 + +### Added + +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + +### Changed + +* Upgraded to `tokio-util` 0.7. +* Upgraded to `parking_lot` 0.12. + ## v0.7.5 - 2021-10-29 ### Fixed From 8785773e8e332f665471d79258dcc8f0051ebf27 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 30 Apr 2022 08:56:34 -0400 Subject: [PATCH 570/819] Release postgres v0.19.3 --- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 6af8d914b..5394da803 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.19.3 - 2022-04-30 + +### Added + +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + ## v0.19.2 - 2021-09-29 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 12f9e98ef..be59df101 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.2" +version = "0.19.3" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -38,7 +38,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] bytes = "1.0" fallible-iterator = "0.2" futures = "0.3" -tokio-postgres = { version = "0.7.2", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.6", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" From a410a5fe04d487ab2ee3aadec8ceca2b12473ff3 Mon Sep 17 00:00:00 2001 From: Lachezar Lechev <8925621+elpiel@users.noreply.github.com> Date: Tue, 21 Jun 2022 13:31:31 +0200 Subject: [PATCH 571/819] Add `array-impls` feature in docs table --- tokio-postgres/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 9afb93801..b486a9338 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -104,6 +104,7 @@ //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | //! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 1.0 with the features `net` and `time` | yes | +//! | `array-impls` | Enables `ToSql` and `FromSql` trait impls for arrays | - | no | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | //! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | From dd3719ddba0fda7a8e067fdf66dda78796a35de8 Mon Sep 17 00:00:00 2001 From: Lachezar Lechev Date: Wed, 22 Jun 2022 18:38:55 +0200 Subject: [PATCH 572/819] workflow - ci - bump Rust version to 1.57 Fixes #909 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88f4170a5..24c479ef9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.56.0 + version: 1.57.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From 1bd0d9271ebaee06ec252197fa1c58feeba2229f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ABl=20Obr=C3=A9jan?= Date: Thu, 23 Jun 2022 15:38:59 +0200 Subject: [PATCH 573/819] Implement `ToSql` & `FromSql` for `Box<[T]>` --- postgres-types/src/lib.rs | 41 +++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index f9c1c5ce5..6f72a733c 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -452,10 +452,11 @@ impl WrongType { /// /// # Arrays /// -/// `FromSql` is implemented for `Vec` and `[T; N]` where `T` implements -/// `FromSql`, and corresponds to one-dimensional Postgres arrays. **Note:** -/// the impl for arrays only exist when the Cargo feature `array-impls` is -/// enabled. +/// `FromSql` is implemented for `Vec`, `Box<[T]>` and `[T; N]` where `T` +/// implements `FromSql`, and corresponds to one-dimensional Postgres arrays. +/// +/// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` +/// is enabled. pub trait FromSql<'a>: Sized { /// Creates a new value of this type from a buffer of data of the specified /// Postgres `Type` in its binary format. @@ -580,6 +581,16 @@ impl<'a, T: FromSql<'a>, const N: usize> FromSql<'a> for [T; N] { } } +impl<'a, T: FromSql<'a>> FromSql<'a> for Box<[T]> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + Vec::::from_sql(ty, raw).map(Vec::into_boxed_slice) + } + + fn accepts(ty: &Type) -> bool { + Vec::::accepts(ty) + } +} + impl<'a> FromSql<'a> for Vec { fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { Ok(types::bytea_from_sql(raw).to_owned()) @@ -783,10 +794,12 @@ pub enum IsNull { /// /// # Arrays /// -/// `ToSql` is implemented for `Vec`, `&[T]` and `[T; N]` where `T` -/// implements `ToSql`, and corresponds to one-dimensional Postgres arrays with -/// an index offset of 1. **Note:** the impl for arrays only exist when the -/// Cargo feature `array-impls` is enabled. +/// `ToSql` is implemented for `Vec`, `&[T]`, `Box<[T]>` and `[T; N]` where +/// `T` implements `ToSql`, and corresponds to one-dimensional Postgres arrays +/// with an index offset of 1. +/// +/// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` +/// is enabled. pub trait ToSql: fmt::Debug { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. @@ -927,6 +940,18 @@ impl ToSql for Vec { to_sql_checked!(); } +impl ToSql for Box<[T]> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&[T] as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[T] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Vec { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[u8] as ToSql>::to_sql(&&**self, ty, w) From 9daf8b1e5d5e230bc1c482b706ba8042b3c64420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sylwester=20R=C4=85pa=C5=82a?= Date: Wed, 29 Jun 2022 22:11:12 +0200 Subject: [PATCH 574/819] feat: support [u8; N] as BYTEA --- postgres-types/src/lib.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 6f72a733c..eca2a78ca 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -915,6 +915,18 @@ impl<'a> ToSql for &'a [u8] { to_sql_checked!(); } +#[cfg(feature = "array-impls")] +impl ToSql for [u8; N] { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::bytea_to_sql(&self[..], w); + Ok(IsNull::No) + } + + accepts!(BYTEA); + + to_sql_checked!(); +} + #[cfg(feature = "array-impls")] impl ToSql for [T; N] { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { From 6b8cb8a2be49db87322d6a75118137c0c9f51432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sylwester=20R=C4=85pa=C5=82a?= Date: Thu, 30 Jun 2022 00:13:19 +0200 Subject: [PATCH 575/819] chore: update ToSql documentation about [u8; N] --- postgres-types/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index eca2a78ca..833d8a107 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -754,7 +754,7 @@ pub enum IsNull { /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | /// | | LTREE, LQUERY, LTXTQUERY | -/// | `&[u8]`/`Vec` | BYTEA | +/// | `&[u8]`/`Vec`/`[u8; N]` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// | `IpAddr` | INET | @@ -794,9 +794,9 @@ pub enum IsNull { /// /// # Arrays /// -/// `ToSql` is implemented for `Vec`, `&[T]`, `Box<[T]>` and `[T; N]` where -/// `T` implements `ToSql`, and corresponds to one-dimensional Postgres arrays -/// with an index offset of 1. +/// `ToSql` is implemented for `[u8; N]`, `Vec`, `&[T]`, `Box<[T]>` and `[T; N]` +/// where `T` implements `ToSql` and `N` is const usize, and corresponds to one-dimensional +/// Postgres arrays with an index offset of 1. /// /// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` /// is enabled. From 7699c78037e917df070c2115dd844f2f0da805bc Mon Sep 17 00:00:00 2001 From: BratSinot Date: Mon, 4 Jul 2022 12:25:58 +0200 Subject: [PATCH 576/819] Add FromSql / ToSql for smol_str::SmolStr. --- postgres-types/Cargo.toml | 2 ++ postgres-types/src/lib.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index d8f147022..83ff718a4 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -48,3 +48,5 @@ uuid-08 = { version = "0.8", package = "uuid", optional = true } uuid-1 = { version = "1.0", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } time-03 = { version = "0.3", package = "time", default-features = false, optional = true } + +smol_str = { version = "0.1.23", default-features = false, optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 6f72a733c..3933f34c8 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -155,6 +155,7 @@ pub use pg_lsn::PgLsn; pub use crate::special::{Date, Timestamp}; use bytes::BytesMut; +use smol_str::SmolStr; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946_684_800; @@ -629,6 +630,18 @@ impl<'a> FromSql<'a> for Box { } } +#[cfg(feature = "smol_str")] +impl<'a> FromSql<'a> for smol_str::SmolStr { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + <&str as FromSql>::from_sql(ty, raw) + .map(SmolStr::from) + } + + fn accepts(ty: &Type) -> bool { + <&str as FromSql>::accepts(ty) + } +} + impl<'a> FromSql<'a> for &'a str { fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { match *ty { @@ -1029,6 +1042,19 @@ impl ToSql for Box { to_sql_checked!(); } +#[cfg(feature = "smol_str")] +impl ToSql for smol_str::SmolStr { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&str as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + macro_rules! simple_to { ($t:ty, $f:ident, $($expected:ident),+) => { impl ToSql for $t { From cff971d48cbe49ae96bec8e7df7ebe306e41f270 Mon Sep 17 00:00:00 2001 From: BratSinot Date: Tue, 5 Jul 2022 09:13:19 +0200 Subject: [PATCH 577/819] Fix suggestions. --- postgres-types/Cargo.toml | 2 +- postgres-types/src/lib.rs | 31 +++++-------------------------- postgres-types/src/smol_str_01.rs | 27 +++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 27 deletions(-) create mode 100644 postgres-types/src/smol_str_01.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 83ff718a4..d84641866 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -49,4 +49,4 @@ uuid-1 = { version = "1.0", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } time-03 = { version = "0.3", package = "time", default-features = false, optional = true } -smol_str = { version = "0.1.23", default-features = false, optional = true } +smol_str-01 = { version = "0.1.23", package = "smol_str", default-features = false, optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 3933f34c8..c06a2fb44 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -155,7 +155,6 @@ pub use pg_lsn::PgLsn; pub use crate::special::{Date, Timestamp}; use bytes::BytesMut; -use smol_str::SmolStr; // Number of seconds from 1970-01-01 to 2000-01-01 const TIME_SEC_CONVERSION: u64 = 946_684_800; @@ -233,6 +232,8 @@ mod time_03; mod uuid_08; #[cfg(feature = "with-uuid-1")] mod uuid_1; +#[cfg(feature = "smol_str-01")] +mod smol_str_01; // The time::{date, time} macros produce compile errors if the crate package is renamed. #[cfg(feature = "with-time-0_2")] @@ -444,6 +445,9 @@ impl WrongType { /// | `eui48::MacAddress` | MACADDR | /// | `cidr::InetCidr` | CIDR | /// | `cidr::InetAddr` | INET | +/// | `smol_str::SmolStr` | VARCHAR, CHAR(n), TEXT, CITEXT, | +/// | | NAME, UNKNOWN, LTREE, LQUERY, | +/// | | LTXTQUERY | /// /// # Nullability /// @@ -630,18 +634,6 @@ impl<'a> FromSql<'a> for Box { } } -#[cfg(feature = "smol_str")] -impl<'a> FromSql<'a> for smol_str::SmolStr { - fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { - <&str as FromSql>::from_sql(ty, raw) - .map(SmolStr::from) - } - - fn accepts(ty: &Type) -> bool { - <&str as FromSql>::accepts(ty) - } -} - impl<'a> FromSql<'a> for &'a str { fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { match *ty { @@ -1042,19 +1034,6 @@ impl ToSql for Box { to_sql_checked!(); } -#[cfg(feature = "smol_str")] -impl ToSql for smol_str::SmolStr { - fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - <&str as ToSql>::to_sql(&&**self, ty, w) - } - - fn accepts(ty: &Type) -> bool { - <&str as ToSql>::accepts(ty) - } - - to_sql_checked!(); -} - macro_rules! simple_to { ($t:ty, $f:ident, $($expected:ident),+) => { impl ToSql for $t { diff --git a/postgres-types/src/smol_str_01.rs b/postgres-types/src/smol_str_01.rs new file mode 100644 index 000000000..a0d024ce2 --- /dev/null +++ b/postgres-types/src/smol_str_01.rs @@ -0,0 +1,27 @@ +use bytes::BytesMut; +use smol_str_01::SmolStr; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for SmolStr { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + <&str as FromSql>::from_sql(ty, raw).map(SmolStr::from) + } + + fn accepts(ty: &Type) -> bool { + <&str as FromSql>::accepts(ty) + } +} + +impl ToSql for SmolStr { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&str as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} From db4c65e884253c700d7de363b629500f3ebc80eb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 5 Jul 2022 07:26:32 -0400 Subject: [PATCH 578/819] clippy --- postgres-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 833d8a107..c14350308 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -700,7 +700,7 @@ impl<'a> FromSql<'a> for SystemTime { let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); let negative = time < 0; - let time = time.abs() as u64; + let time = time.unsigned_abs(); let secs = time / USEC_PER_SEC; let nsec = (time % USEC_PER_SEC) * NSEC_PER_USEC; From 68b1be6715bd6b8f1eef40c3b10653ce756a0a42 Mon Sep 17 00:00:00 2001 From: BratSinot Date: Tue, 5 Jul 2022 14:58:26 +0200 Subject: [PATCH 579/819] rustfmt fix --- postgres-types/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index c06a2fb44..8c32626a2 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -224,6 +224,8 @@ mod geo_types_06; mod geo_types_07; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; +#[cfg(feature = "smol_str-01")] +mod smol_str_01; #[cfg(feature = "with-time-0_2")] mod time_02; #[cfg(feature = "with-time-0_3")] @@ -232,8 +234,6 @@ mod time_03; mod uuid_08; #[cfg(feature = "with-uuid-1")] mod uuid_1; -#[cfg(feature = "smol_str-01")] -mod smol_str_01; // The time::{date, time} macros produce compile errors if the crate package is renamed. #[cfg(feature = "with-time-0_2")] From e6dd655a2ada507d8095e8422fdeef20d053b98e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Jul 2022 20:04:00 -0400 Subject: [PATCH 580/819] Upgrade phf --- codegen/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 14bebccf2..bbe6b789c 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.10" +phf_codegen = "0.11" regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index acb4d04e9..b3b7cf825 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -50,7 +50,7 @@ log = "0.4" parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" -phf = "0.10" +phf = "0.11" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } postgres-types = { version = "0.2.3", path = "../postgres-types" } socket2 = "0.4" From 8736f45fbd73ab48ab49a5401213da0007a956d6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Jul 2022 20:13:48 -0400 Subject: [PATCH 581/819] bump ci version --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 24c479ef9..8d17f4d6b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.57.0 + version: 1.62.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 From d7b10f2cea5edd1e625dd4b1f2e57e859008f61f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 18 Jul 2022 20:18:18 -0400 Subject: [PATCH 582/819] cleanup --- postgres-types/src/lib.rs | 15 +++------------ tokio-postgres/src/query.rs | 5 ++++- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index f4f88085e..48f6b5d43 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -836,7 +836,9 @@ pub trait ToSql: fmt::Debug { ) -> Result>; /// Specify the encode format - fn encode_format(&self) -> Format { Format::Binary } + fn encode_format(&self) -> Format { + Format::Binary + } } /// Supported Postgres message format types @@ -849,17 +851,6 @@ pub enum Format { Binary, } -/// Convert from `Format` to the Postgres integer representation of those formats -impl From for i16 { - fn from(format: Format) -> Self { - match format { - Format::Text => 0, - Format::Binary => 1, - } - } -} - - impl<'a, T> ToSql for &'a T where T: ToSql, diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index af57e2aec..965a21a48 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -156,7 +156,10 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let (param_formats, params):(Vec<_>, Vec<_>) = params.into_iter().map(|p|->(i16, P){(p.borrow_to_sql().encode_format().into(),p)}).unzip(); + let (param_formats, params): (Vec<_>, Vec<_>) = params + .into_iter() + .map(|p| { (p.borrow_to_sql().encode_format() as i16, p) }) + .unzip(); let params = params.into_iter(); assert!( From f048f39812e4024d612cc197743b9513f8238584 Mon Sep 17 00:00:00 2001 From: Harry Maclean Date: Wed, 3 Aug 2022 23:32:30 +1200 Subject: [PATCH 583/819] Derive Debug for Format It's useful to be able to inspect Format values when debugging. --- postgres-types/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 48f6b5d43..e912b378d 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -844,6 +844,7 @@ pub trait ToSql: fmt::Debug { /// Supported Postgres message format types /// /// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8` +#[derive(Debug)] pub enum Format { /// Text format (UTF-8) Text, From 0d11db69404e504ce80d41821410af20445d0141 Mon Sep 17 00:00:00 2001 From: Harry Maclean Date: Wed, 3 Aug 2022 23:33:54 +1200 Subject: [PATCH 584/819] Delegate to inner type for encode_format In the ToSql impls for &T and Option, override encode_format to delegate to the impl for T. This ensures that if T overrides this method, it also overrides it for &T and Option. --- postgres-types/src/lib.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index e912b378d..8f758e974 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -868,6 +868,10 @@ where T::accepts(ty) } + fn encode_format(&self) -> Format { + (*self).encode_format() + } + to_sql_checked!(); } @@ -887,6 +891,13 @@ impl ToSql for Option { ::accepts(ty) } + fn encode_format(&self) -> Format { + match self { + Some(ref val) => val.encode_format(), + None => Format::Binary, + } + } + to_sql_checked!(); } From 17da49912d923db0fdb8b152a09eca748806c289 Mon Sep 17 00:00:00 2001 From: Harry Maclean Date: Wed, 3 Aug 2022 23:49:19 +1200 Subject: [PATCH 585/819] Impl Clone + Copy for Format --- postgres-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 8f758e974..463e3deec 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -844,7 +844,7 @@ pub trait ToSql: fmt::Debug { /// Supported Postgres message format types /// /// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8` -#[derive(Debug)] +#[derive(Clone, Copy, Debug)] pub enum Format { /// Text format (UTF-8) Text, From a624282bed658a53e7e09d194972a025c82dc3e0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 4 Aug 2022 13:07:13 -0400 Subject: [PATCH 586/819] rustfmt --- tokio-postgres/src/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 965a21a48..dd0bd0a25 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -158,7 +158,7 @@ where { let (param_formats, params): (Vec<_>, Vec<_>) = params .into_iter() - .map(|p| { (p.borrow_to_sql().encode_format() as i16, p) }) + .map(|p| (p.borrow_to_sql().encode_format() as i16, p)) .unzip(); let params = params.into_iter(); From 65c1d146a6b3b91652f211d8c72e776164ee87a4 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Sun, 14 Aug 2022 23:31:17 +0800 Subject: [PATCH 587/819] Chore(tokio-postgres): prefer sub-crates of `futures` --- tokio-postgres/Cargo.toml | 8 +++++--- tokio-postgres/benches/bench.rs | 7 +++---- tokio-postgres/src/binary_copy.rs | 2 +- tokio-postgres/src/client.rs | 6 +++--- tokio-postgres/src/connect.rs | 2 +- tokio-postgres/src/connect_raw.rs | 4 ++-- tokio-postgres/src/connection.rs | 5 ++--- tokio-postgres/src/copy_in.rs | 5 ++--- tokio-postgres/src/copy_out.rs | 2 +- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/src/query.rs | 2 +- tokio-postgres/src/simple_query.rs | 2 +- tokio-postgres/src/transaction.rs | 2 +- tokio-postgres/tests/test/binary_copy.rs | 2 +- tokio-postgres/tests/test/main.rs | 4 ++-- tokio-postgres/tests/test/runtime.rs | 2 +- 17 files changed, 29 insertions(+), 30 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b3b7cf825..ecd6c7faa 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -45,7 +45,8 @@ async-trait = "0.1" bytes = "1.0" byteorder = "1.0" fallible-iterator = "0.2" -futures = "0.3" +futures-channel = { version = "0.3", features = ["sink"] } +futures-util = { version = "0.3", features = ["sink"] } log = "0.4" parking_lot = "0.12" percent-encoding = "2.0" @@ -58,9 +59,10 @@ tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] -tokio = { version = "1.0", features = ["full"] } -env_logger = "0.9" +futures-executor = "0.3" criterion = "0.3" +env_logger = "0.9" +tokio = { version = "1.0", features = ["macros", "rt"] } bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs index fececa2b5..a8f9b5f1a 100644 --- a/tokio-postgres/benches/bench.rs +++ b/tokio-postgres/benches/bench.rs @@ -1,6 +1,5 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use futures::channel::oneshot; -use futures::executor; +use futures_channel::oneshot; use std::sync::Arc; use std::time::Instant; use tokio::runtime::Runtime; @@ -32,7 +31,7 @@ fn query_prepared(c: &mut Criterion) { let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("executor_block_on", move |b| { - b.iter(|| executor::block_on(client.query(&statement, &[&1i64])).unwrap()) + b.iter(|| futures_executor::block_on(client.query(&statement, &[&1i64])).unwrap()) }); let (client, runtime) = setup(); @@ -50,7 +49,7 @@ fn query_prepared(c: &mut Criterion) { } tx.send(start.elapsed()).unwrap(); }); - executor::block_on(rx).unwrap() + futures_executor::block_on(rx).unwrap() }) }); } diff --git a/tokio-postgres/src/binary_copy.rs b/tokio-postgres/src/binary_copy.rs index 3b1e13cd7..dab141663 100644 --- a/tokio-postgres/src/binary_copy.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -4,7 +4,7 @@ use crate::types::{FromSql, IsNull, ToSql, Type, WrongType}; use crate::{slice_iter, CopyInSink, CopyOutStream, Error}; use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use futures::{ready, SinkExt, Stream}; +use futures_util::{ready, SinkExt, Stream}; use pin_project_lite::pin_project; use postgres_types::BorrowToSql; use std::convert::TryFrom; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 42d507fda..8444ff56a 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -18,8 +18,8 @@ use crate::{ }; use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; +use futures_channel::mpsc; +use futures_util::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::{backend::Message, frontend}; use postgres_types::BorrowToSql; @@ -341,7 +341,7 @@ impl Client { /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { /// use tokio_postgres::types::ToSql; - /// use futures::{pin_mut, TryStreamExt}; + /// use futures_util::{pin_mut, TryStreamExt}; /// /// let params: Vec = vec![ /// "first param".into(), diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index f529ddbbb..4336f90be 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -4,7 +4,7 @@ use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::{future, pin_mut, Future, FutureExt, Stream}; +use futures_util::{future, pin_mut, Future, FutureExt, Stream}; use std::io; use std::task::Poll; diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 3c6658481..d97636221 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -6,8 +6,8 @@ use crate::tls::{TlsConnect, TlsStream}; use crate::{Client, Connection, Error}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::{ready, Sink, SinkExt, Stream, TryStreamExt}; +use futures_channel::mpsc; +use futures_util::{ready, Sink, SinkExt, Stream, TryStreamExt}; use postgres_protocol::authentication; use postgres_protocol::authentication::sasl; use postgres_protocol::authentication::sasl::ScramSha256; diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index b6805f76c..30be4e834 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -5,9 +5,8 @@ use crate::maybe_tls_stream::MaybeTlsStream; use crate::{AsyncMessage, Error, Notification}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::stream::FusedStream; -use futures::{ready, Sink, Stream, StreamExt}; +use futures_channel::mpsc; +use futures_util::{ready, stream::FusedStream, Sink, Stream, StreamExt}; use log::{info, trace}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index bc90e5277..de1da933b 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -3,9 +3,8 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{query, slice_iter, Error, Statement}; use bytes::{Buf, BufMut, BytesMut}; -use futures::channel::mpsc; -use futures::future; -use futures::{ready, Sink, SinkExt, Stream, StreamExt}; +use futures_channel::mpsc; +use futures_util::{future, ready, Sink, SinkExt, Stream, StreamExt}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 52691b963..1e6949252 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -3,7 +3,7 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{query, slice_iter, Error, Statement}; use bytes::Bytes; -use futures::{ready, Stream}; +use futures_util::{ready, Stream}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index b486a9338..4056819fd 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -69,7 +69,7 @@ //! combinator): //! //! ```rust -//! use futures::future; +//! use futures_util::future; //! use std::future::Future; //! use tokio_postgres::{Client, Error, Statement}; //! diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 7a6163415..e3f09a7c2 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -7,7 +7,7 @@ use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures::{pin_mut, TryStreamExt}; +use futures_util::{pin_mut, TryStreamExt}; use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index dd0bd0a25..f1593dbc4 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -4,7 +4,7 @@ use crate::connection::RequestMessages; use crate::types::{BorrowToSql, IsNull}; use crate::{Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; -use futures::{ready, Stream}; +use futures_util::{ready, Stream}; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index ade2e1d6d..19cb10236 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -4,7 +4,7 @@ use crate::connection::RequestMessages; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures::{ready, Stream}; +use futures_util::{ready, Stream}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index b72b119bf..96a324652 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -13,7 +13,7 @@ use crate::{ SimpleQueryMessage, Statement, ToStatement, }; use bytes::Buf; -use futures::TryStreamExt; +use futures_util::TryStreamExt; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite}; diff --git a/tokio-postgres/tests/test/binary_copy.rs b/tokio-postgres/tests/test/binary_copy.rs index ab69742dc..94b96ab85 100644 --- a/tokio-postgres/tests/test/binary_copy.rs +++ b/tokio-postgres/tests/test/binary_copy.rs @@ -1,5 +1,5 @@ use crate::connect; -use futures::{pin_mut, TryStreamExt}; +use futures_util::{pin_mut, TryStreamExt}; use tokio_postgres::binary_copy::{BinaryCopyInWriter, BinaryCopyOutStream}; use tokio_postgres::types::Type; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index dcfbc5308..0ab4a7bab 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,8 +1,8 @@ #![warn(rust_2018_idioms)] use bytes::{Bytes, BytesMut}; -use futures::channel::mpsc; -use futures::{ +use futures_channel::mpsc; +use futures_util::{ future, join, pin_mut, stream, try_join, Future, FutureExt, SinkExt, StreamExt, TryStreamExt, }; use pin_project_lite::pin_project; diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index b088d6c98..67b4ead8a 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,4 +1,4 @@ -use futures::{join, FutureExt}; +use futures_util::{join, FutureExt}; use std::time::Duration; use tokio::time; use tokio_postgres::error::SqlState; From aeb8fe0df011f289faedb773ce8acab69809ff14 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Sun, 14 Aug 2022 23:43:12 +0800 Subject: [PATCH 588/819] Chore: remove unused `futures` crate for `openssl` and `native-tls` --- postgres-native-tls/Cargo.toml | 4 ++-- postgres-native-tls/src/test.rs | 2 +- postgres-openssl/Cargo.toml | 4 ++-- postgres-openssl/src/test.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 8180cd012..1f2f6385d 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures = "0.3" native-tls = "0.2" tokio = "1.0" tokio-native-tls = "0.3" tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "1.0", features = ["full"] } +futures-util = "0.3" +tokio = { version = "1.0", features = ["macros", "net", "rt"] } postgres = { version = "0.19.0", path = "../postgres" } diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 7a50bc672..25cc6fdbd 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,4 +1,4 @@ -use futures::FutureExt; +use futures_util::FutureExt; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 5738e74d2..8671308af 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -16,12 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -futures = "0.3" openssl = "0.10" tokio = "1.0" tokio-openssl = "0.6" tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "1.0", features = ["full"] } +futures-util = "0.3" +tokio = { version = "1.0", features = ["macros", "net", "rt"] } postgres = { version = "0.19.0", path = "../postgres" } diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index 15ed90ad5..b361ee446 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -1,4 +1,4 @@ -use futures::FutureExt; +use futures_util::FutureExt; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; From 8abc3eaa658c062bdf96866cd48eef22ab11a8b9 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Sun, 14 Aug 2022 23:49:40 +0800 Subject: [PATCH 589/819] Chore(postgres): prefer sub-crates of `futures` --- postgres/Cargo.toml | 3 ++- postgres/src/binary_copy.rs | 2 +- postgres/src/connection.rs | 3 +-- postgres/src/copy_in_writer.rs | 2 +- postgres/src/copy_out_reader.rs | 2 +- postgres/src/notifications.rs | 2 +- postgres/src/row_iter.rs | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index be59df101..15b1b32eb 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -37,7 +37,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -futures = "0.3" +futures-util = { version = "0.3", features = ["sink"] } tokio-postgres = { version = "0.7.6", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } @@ -45,3 +45,4 @@ log = "0.4" [dev-dependencies] criterion = "0.3" +tokio = { version = "1.0", features = ["rt-multi-thread"] } diff --git a/postgres/src/binary_copy.rs b/postgres/src/binary_copy.rs index 98ae666b7..1c4eb7d3b 100644 --- a/postgres/src/binary_copy.rs +++ b/postgres/src/binary_copy.rs @@ -4,7 +4,7 @@ use crate::connection::ConnectionRef; use crate::types::{BorrowToSql, ToSql, Type}; use crate::{CopyInWriter, CopyOutReader, Error}; use fallible_iterator::FallibleIterator; -use futures::StreamExt; +use futures_util::StreamExt; use std::pin::Pin; #[doc(inline)] pub use tokio_postgres::binary_copy::BinaryCopyOutRow; diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index 453eef3a6..b91c16555 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -1,6 +1,5 @@ use crate::{Error, Notification}; -use futures::future; -use futures::{pin_mut, Stream}; +use futures_util::{future, pin_mut, Stream}; use std::collections::VecDeque; use std::future::Future; use std::ops::{Deref, DerefMut}; diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index c996ed857..7de663637 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,7 +1,7 @@ use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; use bytes::{Bytes, BytesMut}; -use futures::SinkExt; +use futures_util::SinkExt; use std::io; use std::io::Write; use tokio_postgres::{CopyInSink, Error}; diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index e8b478d49..828b71873 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,7 +1,7 @@ use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; use bytes::{Buf, Bytes}; -use futures::StreamExt; +use futures_util::StreamExt; use std::io::{self, BufRead, Read}; use tokio_postgres::CopyOutStream; diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs index ea44c31f8..c31d4f631 100644 --- a/postgres/src/notifications.rs +++ b/postgres/src/notifications.rs @@ -3,7 +3,7 @@ use crate::connection::ConnectionRef; use crate::{Error, Notification}; use fallible_iterator::FallibleIterator; -use futures::{ready, FutureExt}; +use futures_util::{ready, FutureExt}; use std::pin::Pin; use std::task::Poll; use std::time::Duration; diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 3cd41b900..772e9893c 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,6 +1,6 @@ use crate::connection::ConnectionRef; use fallible_iterator::FallibleIterator; -use futures::StreamExt; +use futures_util::StreamExt; use std::pin::Pin; use tokio_postgres::{Error, Row, RowStream}; From a0f028a008cf112ddafd808efbf998a0f2cbbdc1 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Mon, 15 Aug 2022 00:01:12 +0800 Subject: [PATCH 590/819] Fix(tokio-postgres): declare dependency on `net` feature --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ecd6c7faa..57af56765 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -62,7 +62,7 @@ tokio-util = { version = "0.7", features = ["codec"] } futures-executor = "0.3" criterion = "0.3" env_logger = "0.9" -tokio = { version = "1.0", features = ["macros", "rt"] } +tokio = { version = "1.0", features = ["macros", "net", "rt", "rt-multi-thread", "time"] } bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } From 5f3e7aecad05f49a6501e53d10f0cef06f5c72cd Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Mon, 15 Aug 2022 00:12:18 +0800 Subject: [PATCH 591/819] Fix: address Clippy warnings --- postgres-openssl/src/lib.rs | 4 +++- postgres-types/src/special.rs | 4 ++-- tokio-postgres/src/config.rs | 14 +++++++------- tokio-postgres/src/connect.rs | 2 +- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 49fc2807c..9fc50b894 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -73,6 +73,8 @@ use tokio_postgres::tls::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; +type ConfigCallback = dyn Fn(&mut ConnectConfiguration, &str) -> Result<(), ErrorStack> + Sync + Send; + /// A `MakeTlsConnect` implementation using the `openssl` crate. /// /// Requires the `runtime` Cargo feature (enabled by default). @@ -80,7 +82,7 @@ mod test; #[derive(Clone)] pub struct MakeTlsConnector { connector: SslConnector, - config: Arc Result<(), ErrorStack> + Sync + Send>, + config: Arc, } #[cfg(feature = "runtime")] diff --git a/postgres-types/src/special.rs b/postgres-types/src/special.rs index 8579885ef..1a865287e 100644 --- a/postgres-types/src/special.rs +++ b/postgres-types/src/special.rs @@ -6,7 +6,7 @@ use std::{i32, i64}; use crate::{FromSql, IsNull, ToSql, Type}; /// A wrapper that can be used to represent infinity with `Type::Date` types. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Date { /// Represents `infinity`, a date that is later than all other dates. PosInfinity, @@ -55,7 +55,7 @@ impl ToSql for Date { /// A wrapper that can be used to represent infinity with `Type::Timestamp` and `Type::Timestamptz` /// types. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Timestamp { /// Represents `infinity`, a timestamp that is later than all other timestamps. PosInfinity, diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index c026cca4f..2c29d629c 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -23,7 +23,7 @@ use std::{error, fmt, iter, mem}; use tokio::io::{AsyncRead, AsyncWrite}; /// Properties required of a session. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum TargetSessionAttrs { /// No special properties are required. @@ -33,7 +33,7 @@ pub enum TargetSessionAttrs { } /// TLS configuration. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum SslMode { /// Do not use TLS. @@ -45,7 +45,7 @@ pub enum SslMode { } /// Channel binding configuration. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum ChannelBinding { /// Do not use channel binding. @@ -57,7 +57,7 @@ pub enum ChannelBinding { } /// A host specification. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Host { /// A TCP hostname. Tcp(String), @@ -144,7 +144,7 @@ pub enum Host { /// ```not_rust /// postgresql:///mydb?user=user&host=/var/lib/postgresql /// ``` -#[derive(PartialEq, Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct Config { pub(crate) user: Option, pub(crate) password: Option>, @@ -452,7 +452,7 @@ impl Config { } } "target_session_attrs" => { - let target_session_attrs = match &*value { + let target_session_attrs = match value { "any" => TargetSessionAttrs::Any, "read-write" => TargetSessionAttrs::ReadWrite, _ => { @@ -900,7 +900,7 @@ impl<'a> UrlParser<'a> { #[cfg(unix)] fn host_param(&mut self, s: &str) -> Result<(), Error> { let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); - if decoded.get(0) == Some(&b'/') { + if decoded.first() == Some(&b'/') { self.config.host_path(OsStr::from_bytes(&decoded)); } else { let decoded = str::from_utf8(&decoded).map_err(|e| Error::config_parse(Box::new(e)))?; diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 4336f90be..88faafe6b 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -28,7 +28,7 @@ where let port = config .port .get(i) - .or_else(|| config.port.get(0)) + .or_else(|| config.port.first()) .copied() .unwrap_or(5432); From 44eac98271cc6c47215fb44d995695a986dfad70 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+Some-Dood@users.noreply.github.com> Date: Mon, 15 Aug 2022 00:14:55 +0800 Subject: [PATCH 592/819] Fix: run `cargo fmt` --- postgres-openssl/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 9fc50b894..837663fe7 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -73,7 +73,8 @@ use tokio_postgres::tls::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; -type ConfigCallback = dyn Fn(&mut ConnectConfiguration, &str) -> Result<(), ErrorStack> + Sync + Send; +type ConfigCallback = + dyn Fn(&mut ConnectConfiguration, &str) -> Result<(), ErrorStack> + Sync + Send; /// A `MakeTlsConnect` implementation using the `openssl` crate. /// From 569689deaa8277e5ae8f6f28ea0b733c43a4d141 Mon Sep 17 00:00:00 2001 From: Alex Pearson Date: Mon, 15 Aug 2022 12:21:20 -0400 Subject: [PATCH 593/819] encode format with types --- postgres-types/src/lib.rs | 10 +++++----- tokio-postgres/src/query.rs | 10 ++++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 463e3deec..5c5a0d023 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -836,7 +836,7 @@ pub trait ToSql: fmt::Debug { ) -> Result>; /// Specify the encode format - fn encode_format(&self) -> Format { + fn encode_format(&self, _ty: &Type) -> Format { Format::Binary } } @@ -868,8 +868,8 @@ where T::accepts(ty) } - fn encode_format(&self) -> Format { - (*self).encode_format() + fn encode_format(&self, ty: &Type) -> Format { + (*self).encode_format(ty) } to_sql_checked!(); @@ -891,9 +891,9 @@ impl ToSql for Option { ::accepts(ty) } - fn encode_format(&self) -> Format { + fn encode_format(&self, ty: &Type) -> Format { match self { - Some(ref val) => val.encode_format(), + Some(ref val) => val.encode_format(ty), None => Format::Binary, } } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index f1593dbc4..5f8cbf7c0 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -156,16 +156,18 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { + let param_types = statement.params(); let (param_formats, params): (Vec<_>, Vec<_>) = params .into_iter() - .map(|p| (p.borrow_to_sql().encode_format() as i16, p)) + .zip(param_types.iter()) + .map(|(p, ty)| (p.borrow_to_sql().encode_format(ty) as i16, p)) .unzip(); let params = params.into_iter(); assert!( - statement.params().len() == params.len(), + param_types.len() == params.len(), "expected {} parameters but got {}", - statement.params().len(), + param_types.len(), params.len() ); @@ -174,7 +176,7 @@ where portal, statement.name(), param_formats, - params.zip(statement.params()).enumerate(), + params.zip(param_types).enumerate(), |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), From 8158eed052f653ffecf6921066ed3b7e5860ee5d Mon Sep 17 00:00:00 2001 From: Alex Pearson Date: Mon, 15 Aug 2022 20:54:11 -0400 Subject: [PATCH 594/819] Move parameter count assertion above format encoding derivation --- tokio-postgres/src/query.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 5f8cbf7c0..71db8769a 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -157,11 +157,6 @@ where I::IntoIter: ExactSizeIterator, { let param_types = statement.params(); - let (param_formats, params): (Vec<_>, Vec<_>) = params - .into_iter() - .zip(param_types.iter()) - .map(|(p, ty)| (p.borrow_to_sql().encode_format(ty) as i16, p)) - .unzip(); let params = params.into_iter(); assert!( @@ -171,6 +166,13 @@ where params.len() ); + let (param_formats, params): (Vec<_>, Vec<_>) = params + .zip(param_types.iter()) + .map(|(p, ty)| (p.borrow_to_sql().encode_format(ty) as i16, p)) + .unzip(); + + let params = params.into_iter(); + let mut error_idx = 0; let r = frontend::bind( portal, From d6a6e9db830bc22a97e4ec2f1b0cde3c3a45e6c0 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 20 Aug 2022 15:18:19 -0400 Subject: [PATCH 595/819] Fix smol_str feature --- postgres-types/Cargo.toml | 4 ++-- postgres-types/src/lib.rs | 2 +- postgres/Cargo.toml | 1 + tokio-postgres/Cargo.toml | 10 +++++++++- tokio-postgres/tests/test/types/mod.rs | 2 ++ tokio-postgres/tests/test/types/smol_str_01.rs | 18 ++++++++++++++++++ 6 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 tokio-postgres/tests/test/types/smol_str_01.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index d84641866..70f1ed54a 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.3" +version = "0.2.4" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -21,6 +21,7 @@ with-eui48-1 = ["eui48-1"] with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] with-serde_json-1 = ["serde-1", "serde_json-1"] +with-smol_str-01 = ["smol_str-01"] with-uuid-0_8 = ["uuid-08"] with-uuid-1 = ["uuid-1"] with-time-0_2 = ["time-02"] @@ -48,5 +49,4 @@ uuid-08 = { version = "0.8", package = "uuid", optional = true } uuid-1 = { version = "1.0", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } time-03 = { version = "0.3", package = "time", default-features = false, optional = true } - smol_str-01 = { version = "0.1.23", package = "smol_str", default-features = false, optional = true } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 5c5a0d023..f5d841cd1 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -224,7 +224,7 @@ mod geo_types_06; mod geo_types_07; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; -#[cfg(feature = "smol_str-01")] +#[cfg(feature = "with-smol_str-01")] mod smol_str_01; #[cfg(feature = "with-time-0_2")] mod time_02; diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 15b1b32eb..b7f0e2ca5 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -29,6 +29,7 @@ with-eui48-1 = ["tokio-postgres/with-eui48-1"] with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] +with-smol_str-01 = ["tokio-postgres/with-smol_str-01"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] with-uuid-1 = ["tokio-postgres/with-uuid-1"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 57af56765..b2190c9af 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -35,6 +35,7 @@ with-eui48-1 = ["postgres-types/with-eui48-1"] with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] +with-smol_str-01 = ["postgres-types/with-smol_str-01"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-uuid-1 = ["postgres-types/with-uuid-1"] with-time-0_2 = ["postgres-types/with-time-0_2"] @@ -62,7 +63,13 @@ tokio-util = { version = "0.7", features = ["codec"] } futures-executor = "0.3" criterion = "0.3" env_logger = "0.9" -tokio = { version = "1.0", features = ["macros", "net", "rt", "rt-multi-thread", "time"] } +tokio = { version = "1.0", features = [ + "macros", + "net", + "rt", + "rt-multi-thread", + "time", +] } bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } @@ -72,6 +79,7 @@ geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } +smol_str-01 = { version = "0.1", package = "smol_str" } uuid-08 = { version = "0.8", package = "uuid" } uuid-1 = { version = "1.0", package = "uuid" } time-02 = { version = "0.2", package = "time" } diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index e3fd663a5..452d149fe 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -27,6 +27,8 @@ mod geo_types_06; mod geo_types_07; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; +#[cfg(feature = "with-smol_str-01")] +mod smol_str_01; #[cfg(feature = "with-time-0_2")] mod time_02; #[cfg(feature = "with-time-0_3")] diff --git a/tokio-postgres/tests/test/types/smol_str_01.rs b/tokio-postgres/tests/test/types/smol_str_01.rs new file mode 100644 index 000000000..843d486b9 --- /dev/null +++ b/tokio-postgres/tests/test/types/smol_str_01.rs @@ -0,0 +1,18 @@ +use smol_str_01::SmolStr; + +use crate::types::test_type; + +#[tokio::test] +async fn test_smol_str() { + test_type( + "VARCHAR", + &[ + (Some(SmolStr::new("hello world")), "'hello world'"), + ( + Some(SmolStr::new("イロハニホヘト チリヌルヲ")), + "'イロハニホヘト チリヌルヲ'", + ), + (None, "NULL"), + ] + ).await; +} From 34efb053ec41b97d687395a94081651a546c4a5c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 20 Aug 2022 15:19:17 -0400 Subject: [PATCH 596/819] rustfmt --- tokio-postgres/tests/test/types/smol_str_01.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/tests/test/types/smol_str_01.rs b/tokio-postgres/tests/test/types/smol_str_01.rs index 843d486b9..105a2c957 100644 --- a/tokio-postgres/tests/test/types/smol_str_01.rs +++ b/tokio-postgres/tests/test/types/smol_str_01.rs @@ -13,6 +13,7 @@ async fn test_smol_str() { "'イロハニホヘト チリヌルヲ'", ), (None, "NULL"), - ] - ).await; + ], + ) + .await; } From 5b95068ee6e644c5234d3663d2d6156baa1747ae Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 20 Aug 2022 15:28:42 -0400 Subject: [PATCH 597/819] Release postgres-types v0.2.4 --- postgres-types/CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index d2284dbd5..0e2167e5f 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,14 @@ # Change Log +## v0.2.4 - 2022-08-20 + +## Added + +* Added `ToSql` and `FromSql` implementations for `Box<[T]>`. +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + ## v0.2.3 - 2022-04-30 ### Added From 8e4484c005499883882e5f9584988e0eceac4eca Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Aug 2022 15:34:36 -0400 Subject: [PATCH 598/819] Release tokio-postgres v0.7.7 --- tokio-postgres/CHANGELOG.md | 8 ++++++++ tokio-postgres/Cargo.toml | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index b294aec69..91e78b780 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,13 @@ # Change Log +## v0.7.7 - 2022-08-21 + +## Added + +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + ## v0.7.6 - 2022-04-30 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b2190c9af..1a9638eff 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.6" +version = "0.7.7" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -54,7 +54,7 @@ percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } -postgres-types = { version = "0.2.3", path = "../postgres-types" } +postgres-types = { version = "0.2.4", path = "../postgres-types" } socket2 = "0.4" tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } From 54331183eaa6ee57f0dfeb01770b93360544c904 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Aug 2022 15:37:10 -0400 Subject: [PATCH 599/819] Release postgres v0.19.4 --- postgres/CHANGELOG.md | 8 ++++++++ postgres/Cargo.toml | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 5394da803..c467c8b73 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,13 @@ # Change Log +## v0.19.4 - 2022-08-21 + +## Added + +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + ## v0.19.3 - 2022-04-30 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index b7f0e2ca5..2ca65941b 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.3" +version = "0.19.4" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -39,7 +39,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] bytes = "1.0" fallible-iterator = "0.2" futures-util = { version = "0.3", features = ["sink"] } -tokio-postgres = { version = "0.7.6", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.7", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" From ed056758887464723881c3f6f21b0140746313de Mon Sep 17 00:00:00 2001 From: Matt Duch Date: Mon, 8 Aug 2022 19:22:06 -0500 Subject: [PATCH 600/819] derive generic FromSql/ToSql --- postgres-derive-test/src/composites.rs | 69 +++++++++++++++++++++++++- postgres-derive-test/src/lib.rs | 24 +++++++++ postgres-derive/src/fromsql.rs | 28 +++++++++-- postgres-derive/src/tosql.rs | 3 +- 4 files changed, 116 insertions(+), 8 deletions(-) diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs index ed60bf48f..667633519 100644 --- a/postgres-derive-test/src/composites.rs +++ b/postgres-derive-test/src/composites.rs @@ -1,6 +1,6 @@ -use crate::test_type; +use crate::{test_type, test_type_asymmetric}; use postgres::{Client, NoTls}; -use postgres_types::{FromSql, ToSql, WrongType}; +use postgres_types::{FromSql, FromSqlOwned, ToSql, WrongType}; use std::error::Error; #[test] @@ -238,3 +238,68 @@ fn raw_ident_field() { test_type(&mut conn, "inventory_item", &[(item, "ROW('foo')")]); } + +#[test] +fn generics() { + #[derive(FromSql, Debug, PartialEq)] + struct InventoryItem + where + U: FromSqlOwned, + { + name: String, + supplier_id: T, + price: Option, + } + + // doesn't make sense to implement derived FromSql on a type with borrows + #[derive(ToSql, Debug, PartialEq)] + #[postgres(name = "InventoryItem")] + struct InventoryItemRef<'a, T: 'a + ToSql, U> + where + U: 'a + ToSql, + { + name: &'a str, + supplier_id: &'a T, + price: Option<&'a U>, + } + + const NAME: &str = "foobar"; + const SUPPLIER_ID: i32 = 100; + const PRICE: f64 = 15.50; + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.\"InventoryItem\" AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItemRef { + name: NAME, + supplier_id: &SUPPLIER_ID, + price: Some(&PRICE), + }; + + let item_null = InventoryItemRef { + name: NAME, + supplier_id: &SUPPLIER_ID, + price: None, + }; + + test_type_asymmetric( + &mut conn, + "\"InventoryItem\"", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + |t: &InventoryItemRef, f: &InventoryItem| { + t.name == f.name.as_str() + && t.supplier_id == &f.supplier_id + && t.price == f.price.as_ref() + }, + ); +} diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index 279ed1419..8bfd147fb 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -27,6 +27,30 @@ where } } +pub fn test_type_asymmetric( + conn: &mut Client, + sql_type: &str, + checks: &[(T, S)], + cmp: C, +) where + T: ToSql + Sync, + F: FromSqlOwned, + S: fmt::Display, + C: Fn(&T, &F) -> bool, +{ + for &(ref val, ref repr) in checks.iter() { + let stmt = conn + .prepare(&*format!("SELECT {}::{}", *repr, sql_type)) + .unwrap(); + let result: F = conn.query_one(&stmt, &[]).unwrap().get(0); + assert!(cmp(val, &result)); + + let stmt = conn.prepare(&*format!("SELECT $1::{}", sql_type)).unwrap(); + let result: F = conn.query_one(&stmt, &[val]).unwrap().get(0); + assert!(cmp(val, &result)); + } +} + #[test] fn compile_fail() { trybuild::TestCases::new().compile_fail("src/compile-fail/*.rs"); diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index c89cbb5e2..415343653 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -1,7 +1,10 @@ -use proc_macro2::TokenStream; +use proc_macro2::{Span, TokenStream}; use quote::{format_ident, quote}; use std::iter; -use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{ + Data, DataStruct, DeriveInput, Error, Fields, GenericParam, Generics, Ident, Lifetime, + LifetimeDef, +}; use crate::accepts; use crate::composites::Field; @@ -86,10 +89,13 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { }; let ident = &input.ident; + let (generics, lifetime) = build_generics(&input.generics); + let (impl_generics, _, _) = generics.split_for_impl(); + let (_, ty_generics, where_clause) = input.generics.split_for_impl(); let out = quote! { - impl<'a> postgres_types::FromSql<'a> for #ident { - fn from_sql(_type: &postgres_types::Type, buf: &'a [u8]) - -> std::result::Result<#ident, + impl#impl_generics postgres_types::FromSql<#lifetime> for #ident#ty_generics #where_clause { + fn from_sql(_type: &postgres_types::Type, buf: &#lifetime [u8]) + -> std::result::Result<#ident#ty_generics, std::boxed::Box> { @@ -200,3 +206,15 @@ fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { }) } } + +fn build_generics(source: &Generics) -> (Generics, Lifetime) { + let mut out = source.to_owned(); + // don't worry about lifetime name collisions, it doesn't make sense to derive FromSql on a struct with a lifetime + let lifetime = Lifetime::new("'a", Span::call_site()); + out.params.insert( + 0, + GenericParam::Lifetime(LifetimeDef::new(lifetime.to_owned())), + ); + + (out, lifetime) +} diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 96f261385..299074f8d 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -82,8 +82,9 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { }; let ident = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); let out = quote! { - impl postgres_types::ToSql for #ident { + impl#impl_generics postgres_types::ToSql for #ident#ty_generics #where_clause { fn to_sql(&self, _type: &postgres_types::Type, buf: &mut postgres_types::private::BytesMut) From a78ce35d44e2c414d8a26074cb3a71c2471797ac Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Fri, 26 Aug 2022 05:09:00 +0000 Subject: [PATCH 601/819] Support keepalive interval and retries. --- postgres/src/config.rs | 43 +++++++++++++ tokio-postgres/src/cancel_query.rs | 3 +- tokio-postgres/src/client.rs | 4 +- tokio-postgres/src/config.rs | 94 +++++++++++++++++++++++++--- tokio-postgres/src/connect.rs | 14 +++-- tokio-postgres/src/connect_socket.rs | 8 +-- tokio-postgres/src/keepalive.rs | 29 +++++++++ tokio-postgres/src/lib.rs | 1 + tokio-postgres/tests/test/parse.rs | 13 ++++ 9 files changed, 189 insertions(+), 20 deletions(-) create mode 100644 tokio-postgres/src/keepalive.rs diff --git a/postgres/src/config.rs b/postgres/src/config.rs index c8dffa330..f616dc689 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -48,6 +48,10 @@ use tokio_postgres::{Error, Socket}; /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `keepalives_interval` - The time interval between TCP keepalive probes. +/// This option is ignored when connecting with Unix sockets. Available on neither Redox nor Solaris. +/// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. +/// This option is ignored when connecting with Unix sockets. Available on neither Redox, Solaris nor Windows. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. @@ -279,6 +283,45 @@ impl Config { self.config.get_keepalives_idle() } + /// Sets the time interval between TCP keepalive probes. + /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + /// + /// Available on neither Redox nor Solaris. + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { + self.config.keepalives_interval(keepalives_interval); + self + } + + /// Gets the time interval between TCP keepalive probes. + /// + /// Available on neither Redox nor Solaris. + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + pub fn get_keepalives_interval(&self) -> Option<&Duration> { + self.config.get_keepalives_interval() + } + + /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + /// + /// Available on neither Redox, Solaris nor Windows. + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { + self.config.keepalives_retries(keepalives_retries); + self + } + + /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// Available on neither Redox, Solaris nor Windows. + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + pub fn get_keepalives_retries(&self) -> Option<&u32> { + self.config.get_keepalives_retries() + } + /// Sets the requirements of the session. /// /// This can be used to connect to the primary server in a clustered database rather than one of the read-only diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index d7bb50474..b02729f85 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -38,8 +38,7 @@ where &config.host, config.port, config.connect_timeout, - config.keepalives, - config.keepalives_idle, + config.keepalive.as_ref(), ) .await?; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 8444ff56a..1ed3964d5 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -4,6 +4,7 @@ use crate::config::Host; use crate::config::SslMode; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; +use crate::keepalive::KeepaliveConfig; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] @@ -154,8 +155,7 @@ pub(crate) struct SocketConfig { pub host: Host, pub port: u16, pub connect_timeout: Option, - pub keepalives: bool, - pub keepalives_idle: Duration, + pub keepalive: Option, } /// An asynchronous PostgreSQL client. diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 2c29d629c..5a0c639e1 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -3,6 +3,7 @@ #[cfg(feature = "runtime")] use crate::connect::connect; use crate::connect_raw::connect_raw; +use crate::keepalive::KeepaliveConfig; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; @@ -99,6 +100,10 @@ pub enum Host { /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `keepalives_interval` - The time interval between TCP keepalive probes. +/// This option is ignored when connecting with Unix sockets. Available on neither Redox nor Solaris. +/// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. +/// This option is ignored when connecting with Unix sockets. Available on neither Redox, Solaris nor Windows. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. @@ -156,7 +161,7 @@ pub struct Config { pub(crate) port: Vec, pub(crate) connect_timeout: Option, pub(crate) keepalives: bool, - pub(crate) keepalives_idle: Duration, + pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, pub(crate) channel_binding: ChannelBinding, } @@ -170,6 +175,13 @@ impl Default for Config { impl Config { /// Creates a new configuration. pub fn new() -> Config { + let keepalive_config = KeepaliveConfig { + idle: Duration::from_secs(2 * 60 * 60), + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + interval: None, + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + retries: None, + }; Config { user: None, password: None, @@ -181,7 +193,7 @@ impl Config { port: vec![], connect_timeout: None, keepalives: true, - keepalives_idle: Duration::from_secs(2 * 60 * 60), + keepalive_config, target_session_attrs: TargetSessionAttrs::Any, channel_binding: ChannelBinding::Prefer, } @@ -347,14 +359,53 @@ impl Config { /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { - self.keepalives_idle = keepalives_idle; + self.keepalive_config.idle = keepalives_idle; self } /// Gets the configured amount of idle time before a keepalive packet will /// be sent on the connection. pub fn get_keepalives_idle(&self) -> Duration { - self.keepalives_idle + self.keepalive_config.idle + } + + /// Sets the time interval between TCP keepalive probes. + /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + /// + /// Available on neither Redox nor Solaris. + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { + self.keepalive_config.interval = Some(keepalives_interval); + self + } + + /// Gets the time interval between TCP keepalive probes. + /// + /// Available on neither Redox nor Solaris. + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + pub fn get_keepalives_interval(&self) -> Option<&Duration> { + self.keepalive_config.interval.as_ref() + } + + /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + /// + /// Available on neither Redox, Solaris nor Windows. + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { + self.keepalive_config.retries = Some(keepalives_retries); + self + } + + /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// Available on neither Redox, Solaris nor Windows. + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + pub fn get_keepalives_retries(&self) -> Option<&u32> { + self.keepalive_config.retries.as_ref() } /// Sets the requirements of the session. @@ -451,6 +502,22 @@ impl Config { self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + "keepalives_interval" => { + let keepalives_interval = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("keepalives_interval"))) + })?; + if keepalives_interval > 0 { + self.keepalives_interval(Duration::from_secs(keepalives_interval as u64)); + } + } + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + "keepalives_retries" => { + let keepalives_retries = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("keepalives_retries"))) + })?; + self.keepalives_retries(keepalives_retries); + } "target_session_attrs" => { let target_session_attrs = match value { "any" => TargetSessionAttrs::Any, @@ -534,8 +601,8 @@ impl fmt::Debug for Config { } } - f.debug_struct("Config") - .field("user", &self.user) + let mut ds = f.debug_struct("Config"); + ds.field("user", &self.user) .field("password", &self.password.as_ref().map(|_| Redaction {})) .field("dbname", &self.dbname) .field("options", &self.options) @@ -545,8 +612,19 @@ impl fmt::Debug for Config { .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) .field("keepalives", &self.keepalives) - .field("keepalives_idle", &self.keepalives_idle) - .field("target_session_attrs", &self.target_session_attrs) + .field("keepalives_idle", &self.keepalive_config.idle); + + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + { + ds.field("keepalives_interval", &self.keepalive_config.interval); + } + + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + { + ds.field("keepalives_retries", &self.keepalive_config.retries); + } + + ds.field("target_session_attrs", &self.target_session_attrs) .field("channel_binding", &self.channel_binding) .finish() } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 88faafe6b..97a00c812 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -65,8 +65,11 @@ where host, port, config.connect_timeout, - config.keepalives, - config.keepalives_idle, + if config.keepalives { + Some(&config.keepalive_config) + } else { + None + }, ) .await?; let (mut client, mut connection) = connect_raw(socket, tls, config).await?; @@ -115,8 +118,11 @@ where host: host.clone(), port, connect_timeout: config.connect_timeout, - keepalives: config.keepalives, - keepalives_idle: config.keepalives_idle, + keepalive: if config.keepalives { + Some(config.keepalive_config.clone()) + } else { + None + }, }); Ok((client, connection)) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 474676908..19d01d87a 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,4 +1,5 @@ use crate::config::Host; +use crate::keepalive::KeepaliveConfig; use crate::{Error, Socket}; use socket2::{SockRef, TcpKeepalive}; use std::future::Future; @@ -13,8 +14,7 @@ pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, - keepalives: bool, - keepalives_idle: Duration, + keepalive_config: Option<&KeepaliveConfig>, ) -> Result { match host { Host::Tcp(host) => { @@ -35,9 +35,9 @@ pub(crate) async fn connect_socket( }; stream.set_nodelay(true).map_err(Error::connect)?; - if keepalives { + if let Some(keepalive_config) = keepalive_config { SockRef::from(&stream) - .set_tcp_keepalive(&TcpKeepalive::new().with_time(keepalives_idle)) + .set_tcp_keepalive(&TcpKeepalive::from(keepalive_config)) .map_err(Error::connect)?; } diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs new file mode 100644 index 000000000..4b61d2099 --- /dev/null +++ b/tokio-postgres/src/keepalive.rs @@ -0,0 +1,29 @@ +use socket2::TcpKeepalive; +use std::time::Duration; + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct KeepaliveConfig { + pub idle: Duration, + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + pub interval: Option, + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + pub retries: Option, +} + +impl From<&KeepaliveConfig> for TcpKeepalive { + fn from(keepalive_config: &KeepaliveConfig) -> Self { + let mut tcp_keepalive = Self::new().with_time(keepalive_config.idle); + + #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + if let Some(interval) = keepalive_config.interval { + tcp_keepalive = tcp_keepalive.with_interval(interval); + } + + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + if let Some(retries) = keepalive_config.retries { + tcp_keepalive = tcp_keepalive.with_retries(retries); + } + + tcp_keepalive + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 4056819fd..bd4d7b8ce 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -163,6 +163,7 @@ mod copy_in; mod copy_out; pub mod error; mod generic_client; +mod keepalive; mod maybe_tls_stream; mod portal; mod prepare; diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index a7a9625b2..575d962a2 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -36,6 +36,19 @@ fn settings() { ); } +#[test] +#[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] +fn keepalive_settings() { + check( + "keepalives=1 keepalives_idle=15 keepalives_interval=5 keepalives_retries=9", + Config::new() + .keepalives(true) + .keepalives_idle(Duration::from_secs(15)) + .keepalives_interval(Duration::from_secs(5)) + .keepalives_retries(9), + ); +} + #[test] fn url() { check("postgresql://", &Config::new()); From b5a53960b18197f7220199568f33063bb496a08c Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Fri, 26 Aug 2022 19:27:06 +0000 Subject: [PATCH 602/819] Review fix: Define features regardless of platform and do nothing if not supported. --- postgres/src/config.rs | 16 ++---------- tokio-postgres/src/config.rs | 41 ++++++------------------------ tokio-postgres/src/keepalive.rs | 2 -- tokio-postgres/tests/test/parse.rs | 1 - 4 files changed, 10 insertions(+), 50 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index f616dc689..7d02affd2 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -49,9 +49,9 @@ use tokio_postgres::{Error, Socket}; /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. /// * `keepalives_interval` - The time interval between TCP keepalive probes. -/// This option is ignored when connecting with Unix sockets. Available on neither Redox nor Solaris. +/// This option is ignored when connecting with Unix sockets. /// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. -/// This option is ignored when connecting with Unix sockets. Available on neither Redox, Solaris nor Windows. +/// This option is ignored when connecting with Unix sockets. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. @@ -287,18 +287,12 @@ impl Config { /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. - /// - /// Available on neither Redox nor Solaris. - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { self.config.keepalives_interval(keepalives_interval); self } /// Gets the time interval between TCP keepalive probes. - /// - /// Available on neither Redox nor Solaris. - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] pub fn get_keepalives_interval(&self) -> Option<&Duration> { self.config.get_keepalives_interval() } @@ -306,18 +300,12 @@ impl Config { /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. - /// - /// Available on neither Redox, Solaris nor Windows. - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { self.config.keepalives_retries(keepalives_retries); self } /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. - /// - /// Available on neither Redox, Solaris nor Windows. - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] pub fn get_keepalives_retries(&self) -> Option<&u32> { self.config.get_keepalives_retries() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 5a0c639e1..6931e012d 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -101,9 +101,9 @@ pub enum Host { /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. /// * `keepalives_interval` - The time interval between TCP keepalive probes. -/// This option is ignored when connecting with Unix sockets. Available on neither Redox nor Solaris. +/// This option is ignored when connecting with Unix sockets. /// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. -/// This option is ignored when connecting with Unix sockets. Available on neither Redox, Solaris nor Windows. +/// This option is ignored when connecting with Unix sockets. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. @@ -177,9 +177,7 @@ impl Config { pub fn new() -> Config { let keepalive_config = KeepaliveConfig { idle: Duration::from_secs(2 * 60 * 60), - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] interval: None, - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] retries: None, }; Config { @@ -373,18 +371,12 @@ impl Config { /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. - /// - /// Available on neither Redox nor Solaris. - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { self.keepalive_config.interval = Some(keepalives_interval); self } /// Gets the time interval between TCP keepalive probes. - /// - /// Available on neither Redox nor Solaris. - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] pub fn get_keepalives_interval(&self) -> Option<&Duration> { self.keepalive_config.interval.as_ref() } @@ -392,18 +384,12 @@ impl Config { /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. - /// - /// Available on neither Redox, Solaris nor Windows. - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { self.keepalive_config.retries = Some(keepalives_retries); self } /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. - /// - /// Available on neither Redox, Solaris nor Windows. - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] pub fn get_keepalives_retries(&self) -> Option<&u32> { self.keepalive_config.retries.as_ref() } @@ -502,7 +488,6 @@ impl Config { self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] "keepalives_interval" => { let keepalives_interval = value.parse::().map_err(|_| { Error::config_parse(Box::new(InvalidValue("keepalives_interval"))) @@ -511,7 +496,6 @@ impl Config { self.keepalives_interval(Duration::from_secs(keepalives_interval as u64)); } } - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] "keepalives_retries" => { let keepalives_retries = value.parse::().map_err(|_| { Error::config_parse(Box::new(InvalidValue("keepalives_retries"))) @@ -601,8 +585,8 @@ impl fmt::Debug for Config { } } - let mut ds = f.debug_struct("Config"); - ds.field("user", &self.user) + f.debug_struct("Config") + .field("user", &self.user) .field("password", &self.password.as_ref().map(|_| Redaction {})) .field("dbname", &self.dbname) .field("options", &self.options) @@ -612,19 +596,10 @@ impl fmt::Debug for Config { .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) .field("keepalives", &self.keepalives) - .field("keepalives_idle", &self.keepalive_config.idle); - - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] - { - ds.field("keepalives_interval", &self.keepalive_config.interval); - } - - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] - { - ds.field("keepalives_retries", &self.keepalive_config.retries); - } - - ds.field("target_session_attrs", &self.target_session_attrs) + .field("keepalives_idle", &self.keepalive_config.idle) + .field("keepalives_interval", &self.keepalive_config.interval) + .field("keepalives_retries", &self.keepalive_config.retries) + .field("target_session_attrs", &self.target_session_attrs) .field("channel_binding", &self.channel_binding) .finish() } diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs index 4b61d2099..74f453985 100644 --- a/tokio-postgres/src/keepalive.rs +++ b/tokio-postgres/src/keepalive.rs @@ -4,9 +4,7 @@ use std::time::Duration; #[derive(Clone, PartialEq, Eq)] pub(crate) struct KeepaliveConfig { pub idle: Duration, - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] pub interval: Option, - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] pub retries: Option, } diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 575d962a2..2c11899ca 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -37,7 +37,6 @@ fn settings() { } #[test] -#[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] fn keepalive_settings() { check( "keepalives=1 keepalives_idle=15 keepalives_interval=5 keepalives_retries=9", From b46ea89c42441187d15cd70e651907e29d6338c8 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Fri, 26 Aug 2022 13:46:04 -0700 Subject: [PATCH 603/819] Add hostaddr support --- tokio-postgres/src/config.rs | 70 +++++++++++++++++++++++++++++++++++ tokio-postgres/src/connect.rs | 23 +++++++++++- 2 files changed, 91 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 2c29d629c..f29eed2b1 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -12,6 +12,7 @@ use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] use std::ffi::OsStr; +use std::ops::Deref; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; #[cfg(unix)] @@ -90,6 +91,17 @@ pub enum Host { /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// - or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// Note that `host` is always required regardless of whether `hostaddr` is present. +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. @@ -117,6 +129,10 @@ pub enum Host { /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// @@ -153,6 +169,7 @@ pub struct Config { pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, pub(crate) host: Vec, + pub(crate) hostaddr: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, pub(crate) keepalives: bool, @@ -178,6 +195,7 @@ impl Config { application_name: None, ssl_mode: SslMode::Prefer, host: vec![], + hostaddr: vec![], port: vec![], connect_timeout: None, keepalives: true, @@ -288,6 +306,11 @@ impl Config { &self.host } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[String] { + self.hostaddr.deref() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -300,6 +323,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: &str) -> &mut Config { + self.hostaddr.push(hostaddr.to_string()); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which @@ -418,6 +450,11 @@ impl Config { self.host(host); } } + "hostaddr" => { + for hostaddr in value.split(',') { + self.hostaddr(hostaddr); + } + } "port" => { for port in value.split(',') { let port = if port.is_empty() { @@ -542,6 +579,7 @@ impl fmt::Debug for Config { .field("application_name", &self.application_name) .field("ssl_mode", &self.ssl_mode) .field("host", &self.host) + .field("hostaddr", &self.hostaddr) .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) .field("keepalives", &self.keepalives) @@ -922,3 +960,35 @@ impl<'a> UrlParser<'a> { .map_err(|e| Error::config_parse(e.into())) } } + +#[cfg(test)] +mod tests { + use crate::{config::Host, Config}; + + #[test] + fn test_simple_parsing() { + let s = "user=pass_user dbname=postgres host=host1,host2 hostaddr=127.0.0.1,127.0.0.2 port=26257"; + let config = s.parse::().unwrap(); + assert_eq!(Some("pass_user"), config.get_user()); + assert_eq!(Some("postgres"), config.get_dbname()); + assert_eq!( + [ + Host::Tcp("host1".to_string()), + Host::Tcp("host2".to_string()) + ], + config.get_hosts(), + ); + + assert_eq!(["127.0.0.1", "127.0.0.2"], config.get_hostaddrs(),); + + assert_eq!(1, 1); + } + + #[test] + fn test_empty_hostaddrs() { + let s = + "user=pass_user dbname=postgres host=host1,host2,host3 hostaddr=127.0.0.1,,127.0.0.2"; + let config = s.parse::().unwrap(); + assert_eq!(["127.0.0.1", "", "127.0.0.2"], config.get_hostaddrs(),); + } +} diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 88faafe6b..e8ac29b42 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -23,6 +23,15 @@ where return Err(Error::config("invalid number of ports".into())); } + if !config.hostaddr.is_empty() && config.hostaddr.len() != config.host.len() { + let msg = format!( + "invalid number of hostaddrs ({}). Possible values: 0 or number of hosts ({})", + config.hostaddr.len(), + config.host.len(), + ); + return Err(Error::config(msg.into())); + } + let mut error = None; for (i, host) in config.host.iter().enumerate() { let port = config @@ -32,18 +41,28 @@ where .copied() .unwrap_or(5432); + // The value of host is always used as the hostname for TLS validation. let hostname = match host { Host::Tcp(host) => host.as_str(), // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] Host::Unix(_) => "", }; - let tls = tls .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - match connect_once(host, port, tls, config).await { + // If both host and hostaddr are specified, the value of hostaddr is used to to establish the TCP connection. + let hostaddr = match host { + Host::Tcp(_hostname) => match config.hostaddr.get(i) { + Some(hostaddr) if hostaddr.is_empty() => Host::Tcp(hostaddr.clone()), + _ => host.clone(), + }, + #[cfg(unix)] + Host::Unix(_v) => host.clone(), + }; + + match connect_once(&hostaddr, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } From a97e17b51e2e5ebbedae8d5ad501db9fc71e179a Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Sat, 27 Aug 2022 13:46:22 +0000 Subject: [PATCH 604/819] Fix CI error: Prevent "unused imports" in --no-default-features. --- tokio-postgres/src/client.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 1ed3964d5..ad5aa2866 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -4,6 +4,7 @@ use crate::config::Host; use crate::config::SslMode; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; +#[cfg(feature = "runtime")] use crate::keepalive::KeepaliveConfig; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; From 0fcd0c26a59420468999571bd418d57b8e2ad754 Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Sat, 27 Aug 2022 13:56:17 +0000 Subject: [PATCH 605/819] Review fix: get_keepalives_{interval,retries} returns copied Option. --- postgres/src/config.rs | 4 ++-- tokio-postgres/src/config.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 7d02affd2..b541ec846 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -293,7 +293,7 @@ impl Config { } /// Gets the time interval between TCP keepalive probes. - pub fn get_keepalives_interval(&self) -> Option<&Duration> { + pub fn get_keepalives_interval(&self) -> Option { self.config.get_keepalives_interval() } @@ -306,7 +306,7 @@ impl Config { } /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. - pub fn get_keepalives_retries(&self) -> Option<&u32> { + pub fn get_keepalives_retries(&self) -> Option { self.config.get_keepalives_retries() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 6931e012d..e8d057640 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -377,8 +377,8 @@ impl Config { } /// Gets the time interval between TCP keepalive probes. - pub fn get_keepalives_interval(&self) -> Option<&Duration> { - self.keepalive_config.interval.as_ref() + pub fn get_keepalives_interval(&self) -> Option { + self.keepalive_config.interval.as_ref().copied() } /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. @@ -390,8 +390,8 @@ impl Config { } /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. - pub fn get_keepalives_retries(&self) -> Option<&u32> { - self.keepalive_config.retries.as_ref() + pub fn get_keepalives_retries(&self) -> Option { + self.keepalive_config.retries.as_ref().copied() } /// Sets the requirements of the session. From 4882ecc2de0b02183f4c69249d59178ec5e11f40 Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Sat, 27 Aug 2022 13:58:31 +0000 Subject: [PATCH 606/819] Review fix: get_connect_timeout returns copied Option. --- postgres/src/config.rs | 2 +- tokio-postgres/src/config.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b541ec846..db8d6613c 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -252,7 +252,7 @@ impl Config { /// Gets the connection timeout, if one has been set with the /// `connect_timeout` method. - pub fn get_connect_timeout(&self) -> Option<&Duration> { + pub fn get_connect_timeout(&self) -> Option { self.config.get_connect_timeout() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index e8d057640..3a11e99ca 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -336,8 +336,8 @@ impl Config { /// Gets the connection timeout, if one has been set with the /// `connect_timeout` method. - pub fn get_connect_timeout(&self) -> Option<&Duration> { - self.connect_timeout.as_ref() + pub fn get_connect_timeout(&self) -> Option { + self.connect_timeout.as_ref().copied() } /// Controls the use of TCP keepalive. From 5551b3e6cdd81c18d5ffb13ee5478fcd0e551475 Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Sat, 27 Aug 2022 14:18:27 +0000 Subject: [PATCH 607/819] Revert "Review fix: get_connect_timeout returns copied Option." This reverts commit 4882ecc2de0b02183f4c69249d59178ec5e11f40. --- postgres/src/config.rs | 2 +- tokio-postgres/src/config.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index db8d6613c..b541ec846 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -252,7 +252,7 @@ impl Config { /// Gets the connection timeout, if one has been set with the /// `connect_timeout` method. - pub fn get_connect_timeout(&self) -> Option { + pub fn get_connect_timeout(&self) -> Option<&Duration> { self.config.get_connect_timeout() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 3a11e99ca..e8d057640 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -336,8 +336,8 @@ impl Config { /// Gets the connection timeout, if one has been set with the /// `connect_timeout` method. - pub fn get_connect_timeout(&self) -> Option { - self.connect_timeout.as_ref().copied() + pub fn get_connect_timeout(&self) -> Option<&Duration> { + self.connect_timeout.as_ref() } /// Controls the use of TCP keepalive. From d7ccbb3d4255fe7906703f1edd4017915095f87d Mon Sep 17 00:00:00 2001 From: Hirotaka Azuma Date: Sat, 27 Aug 2022 14:19:01 +0000 Subject: [PATCH 608/819] Review fix: Avoid redundant function calls. --- tokio-postgres/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index e8d057640..5b364ec06 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -378,7 +378,7 @@ impl Config { /// Gets the time interval between TCP keepalive probes. pub fn get_keepalives_interval(&self) -> Option { - self.keepalive_config.interval.as_ref().copied() + self.keepalive_config.interval } /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. @@ -391,7 +391,7 @@ impl Config { /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. pub fn get_keepalives_retries(&self) -> Option { - self.keepalive_config.retries.as_ref().copied() + self.keepalive_config.retries } /// Sets the requirements of the session. From 3c9315e3200f5eb99bb5a9b5998aca555951d691 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:40:57 -0700 Subject: [PATCH 609/819] IpAddr + try hostaddr first --- tokio-postgres/src/config.rs | 36 ++++++++++-------- tokio-postgres/src/connect.rs | 61 +++++++++++++++++++------------ tokio-postgres/tests/test/main.rs | 52 ++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 39 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 0c62b5030..34accdbe8 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -13,6 +13,7 @@ use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] use std::ffi::OsStr; +use std::net::IpAddr; use std::ops::Deref; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; @@ -98,7 +99,9 @@ pub enum Host { /// - or if host specifies an IP address, that value will be used directly. /// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications /// with time constraints. However, a host name is required for verify-full SSL certificate verification. -/// Note that `host` is always required regardless of whether `hostaddr` is present. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; /// * If `host` is specified without `hostaddr`, a host name lookup occurs; /// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. /// The value for `host` is ignored unless the authentication method requires it, @@ -174,7 +177,7 @@ pub struct Config { pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, pub(crate) host: Vec, - pub(crate) hostaddr: Vec, + pub(crate) hostaddr: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, pub(crate) keepalives: bool, @@ -317,7 +320,7 @@ impl Config { } /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. - pub fn get_hostaddrs(&self) -> &[String] { + pub fn get_hostaddrs(&self) -> &[IpAddr] { self.hostaddr.deref() } @@ -337,8 +340,8 @@ impl Config { /// /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. - pub fn hostaddr(&mut self, hostaddr: &str) -> &mut Config { - self.hostaddr.push(hostaddr.to_string()); + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.hostaddr.push(hostaddr); self } @@ -489,7 +492,10 @@ impl Config { } "hostaddr" => { for hostaddr in value.split(',') { - self.hostaddr(hostaddr); + let addr = hostaddr + .parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("hostaddr"))))?; + self.hostaddr(addr); } } "port" => { @@ -1016,6 +1022,8 @@ impl<'a> UrlParser<'a> { #[cfg(test)] mod tests { + use std::net::IpAddr; + use crate::{config::Host, Config}; #[test] @@ -1032,16 +1040,14 @@ mod tests { config.get_hosts(), ); - assert_eq!(["127.0.0.1", "127.0.0.2"], config.get_hostaddrs(),); + assert_eq!( + [ + "127.0.0.1".parse::().unwrap(), + "127.0.0.2".parse::().unwrap() + ], + config.get_hostaddrs(), + ); assert_eq!(1, 1); } - - #[test] - fn test_empty_hostaddrs() { - let s = - "user=pass_user dbname=postgres host=host1,host2,host3 hostaddr=127.0.0.1,,127.0.0.2"; - let config = s.parse::().unwrap(); - assert_eq!(["127.0.0.1", "", "127.0.0.2"], config.get_hostaddrs(),); - } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index c36677234..ee1dc1c76 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -5,8 +5,8 @@ use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; use futures_util::{future, pin_mut, Future, FutureExt, Stream}; -use std::io; use std::task::Poll; +use std::{cmp, io}; pub async fn connect( mut tls: T, @@ -15,25 +15,35 @@ pub async fn connect( where T: MakeTlsConnect, { - if config.host.is_empty() { - return Err(Error::config("host missing".into())); + if config.host.is_empty() && config.hostaddr.is_empty() { + return Err(Error::config("both host and hostaddr are missing".into())); } - if config.port.len() > 1 && config.port.len() != config.host.len() { - return Err(Error::config("invalid number of ports".into())); - } - - if !config.hostaddr.is_empty() && config.hostaddr.len() != config.host.len() { + if !config.host.is_empty() + && !config.hostaddr.is_empty() + && config.host.len() != config.hostaddr.len() + { let msg = format!( - "invalid number of hostaddrs ({}). Possible values: 0 or number of hosts ({})", - config.hostaddr.len(), + "number of hosts ({}) is different from number of hostaddrs ({})", config.host.len(), + config.hostaddr.len(), ); return Err(Error::config(msg.into())); } + // At this point, either one of the following two scenarios could happen: + // (1) either config.host or config.hostaddr must be empty; + // (2) if both config.host and config.hostaddr are NOT empty; their lengths must be equal. + let num_hosts = cmp::max(config.host.len(), config.hostaddr.len()); + + if config.port.len() > 1 && config.port.len() != num_hosts { + return Err(Error::config("invalid number of ports".into())); + } + let mut error = None; - for (i, host) in config.host.iter().enumerate() { + for i in 0..num_hosts { + let host = config.host.get(i); + let hostaddr = config.hostaddr.get(i); let port = config .port .get(i) @@ -42,27 +52,30 @@ where .unwrap_or(5432); // The value of host is always used as the hostname for TLS validation. + // postgres doesn't support TLS over unix sockets, so the choice for Host::Unix variant here doesn't matter let hostname = match host { - Host::Tcp(host) => host.as_str(), - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", + Some(Host::Tcp(host)) => host.as_str(), + _ => "", }; let tls = tls .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - // If both host and hostaddr are specified, the value of hostaddr is used to to establish the TCP connection. - let hostaddr = match host { - Host::Tcp(_hostname) => match config.hostaddr.get(i) { - Some(hostaddr) if hostaddr.is_empty() => Host::Tcp(hostaddr.clone()), - _ => host.clone(), - }, - #[cfg(unix)] - Host::Unix(_v) => host.clone(), + // Try to use the value of hostaddr to establish the TCP connection, + // fallback to host if hostaddr is not present. + let addr = match hostaddr { + Some(ipaddr) => Host::Tcp(ipaddr.to_string()), + None => { + if let Some(host) = host { + host.clone() + } else { + // This is unreachable. + return Err(Error::config("both host and hostaddr are empty".into())); + } + } }; - match connect_once(&hostaddr, port, tls, config).await { + match connect_once(&addr, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 0ab4a7bab..387c90d7c 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -147,6 +147,58 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } +#[tokio::test] +async fn host_only_ok() { + let _ = tokio_postgres::connect( + "host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_only_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_and_host_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_mismatch() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_both_missing() { + let _ = tokio_postgres::connect( + "port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + #[tokio::test] async fn pipelined_prepare() { let client = connect("user=postgres").await; From e30bff65a35d1240f8b920c49569a40563712e5d Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:55:11 -0700 Subject: [PATCH 610/819] also update postgres --- postgres/src/config.rs | 33 +++++++++++++++++++++++++++++++++ tokio-postgres/src/config.rs | 1 + 2 files changed, 34 insertions(+) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b541ec846..a754ff91f 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -6,6 +6,7 @@ use crate::connection::Connection; use crate::Client; use log::info; use std::fmt; +use std::net::IpAddr; use std::path::Path; use std::str::FromStr; use std::sync::Arc; @@ -39,6 +40,19 @@ use tokio_postgres::{Error, Socket}; /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// - or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. @@ -67,6 +81,10 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// @@ -204,6 +222,7 @@ impl Config { /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// There must be either no hosts, or the same number of hosts as hostaddrs. pub fn host(&mut self, host: &str) -> &mut Config { self.config.host(host); self @@ -214,6 +233,11 @@ impl Config { self.config.get_hosts() } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[IpAddr] { + self.config.get_hostaddrs() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -226,6 +250,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.config.hostaddr(hostaddr); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 34accdbe8..923da2985 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -302,6 +302,7 @@ impl Config { /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// There must be either no hosts, or the same number of hosts as hostaddrs. pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] { From 6c49a452feb273430d0091de83961ad65ffb9102 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:55:47 -0700 Subject: [PATCH 611/819] fmt --- postgres/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index a754ff91f..921566b66 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -83,7 +83,7 @@ use tokio_postgres::{Error, Socket}; /// ```not_rust /// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write /// ``` -/// +/// /// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` @@ -236,7 +236,7 @@ impl Config { /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. pub fn get_hostaddrs(&self) -> &[IpAddr] { self.config.get_hostaddrs() - } + } /// Adds a Unix socket host to the configuration. /// From 42fef24973dff5450b294df21e94e665fe4d996d Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sun, 28 Aug 2022 12:09:53 -0700 Subject: [PATCH 612/819] explicitly handle host being None --- tokio-postgres/src/connect.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index ee1dc1c76..63574516c 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -51,14 +51,17 @@ where .copied() .unwrap_or(5432); - // The value of host is always used as the hostname for TLS validation. - // postgres doesn't support TLS over unix sockets, so the choice for Host::Unix variant here doesn't matter + // The value of host is used as the hostname for TLS validation, + // if it's not present, use the value of hostaddr. let hostname = match host { - Some(Host::Tcp(host)) => host.as_str(), - _ => "", + Some(Host::Tcp(host)) => host.clone(), + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter Some() + #[cfg(unix)] + Some(Host::Unix(_)) => "".to_string(), + None => hostaddr.map_or("".to_string(), |ipaddr| ipaddr.to_string()), }; let tls = tls - .make_tls_connect(hostname) + .make_tls_connect(&hostname) .map_err(|e| Error::tls(e.into()))?; // Try to use the value of hostaddr to establish the TCP connection, From 9b34d74df143527602a18b1564b554647dbf5eaf Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sun, 28 Aug 2022 12:18:36 -0700 Subject: [PATCH 613/819] add negative test --- tokio-postgres/src/config.rs | 6 ++++++ tokio-postgres/src/connect.rs | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 923da2985..e5bed8ddf 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1051,4 +1051,10 @@ mod tests { assert_eq!(1, 1); } + + #[test] + fn test_invalid_hostaddr_parsing() { + let s = "user=pass_user dbname=postgres host=host1 hostaddr=127.0.0 port=26257"; + s.parse::().err().unwrap(); + } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 63574516c..888f9cf8a 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -55,7 +55,7 @@ where // if it's not present, use the value of hostaddr. let hostname = match host { Some(Host::Tcp(host)) => host.clone(), - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter Some() + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] Some(Host::Unix(_)) => "".to_string(), None => hostaddr.map_or("".to_string(), |ipaddr| ipaddr.to_string()), From 8ac10ff1de52281592d5bdd75e109d995ca33a2c Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Tue, 30 Aug 2022 22:10:19 -0700 Subject: [PATCH 614/819] move test to runtime --- tokio-postgres/tests/test/main.rs | 52 ---------------------------- tokio-postgres/tests/test/runtime.rs | 52 ++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 387c90d7c..0ab4a7bab 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -147,58 +147,6 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } -#[tokio::test] -async fn host_only_ok() { - let _ = tokio_postgres::connect( - "host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_only_ok() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_and_host_ok() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_host_mismatch() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .err() - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_host_both_missing() { - let _ = tokio_postgres::connect( - "port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .err() - .unwrap(); -} - #[tokio::test] async fn pipelined_prepare() { let client = connect("user=postgres").await; diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 67b4ead8a..86c1f0701 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -66,6 +66,58 @@ async fn target_session_attrs_err() { .unwrap(); } +#[tokio::test] +async fn host_only_ok() { + let _ = tokio_postgres::connect( + "host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_only_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_and_host_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_mismatch() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_both_missing() { + let _ = tokio_postgres::connect( + "port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + #[tokio::test] async fn cancel_query() { let client = connect("host=localhost port=5433 user=postgres").await; From 3827b2e44279bbb8b869e07cf91c383d2445893d Mon Sep 17 00:00:00 2001 From: Matt Duch Date: Thu, 25 Aug 2022 19:29:09 -0500 Subject: [PATCH 615/819] derive bounds on generics --- postgres-derive-test/src/composites.rs | 10 ++++----- postgres-derive/src/composites.rs | 26 ++++++++++++++++++++++- postgres-derive/src/fromsql.rs | 29 +++++++++++++++++++++++--- postgres-derive/src/tosql.rs | 20 +++++++++++++++--- 4 files changed, 73 insertions(+), 12 deletions(-) diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs index 667633519..a1b76345f 100644 --- a/postgres-derive-test/src/composites.rs +++ b/postgres-derive-test/src/composites.rs @@ -1,6 +1,6 @@ use crate::{test_type, test_type_asymmetric}; use postgres::{Client, NoTls}; -use postgres_types::{FromSql, FromSqlOwned, ToSql, WrongType}; +use postgres_types::{FromSql, ToSql, WrongType}; use std::error::Error; #[test] @@ -242,9 +242,9 @@ fn raw_ident_field() { #[test] fn generics() { #[derive(FromSql, Debug, PartialEq)] - struct InventoryItem + struct InventoryItem where - U: FromSqlOwned, + U: Clone, { name: String, supplier_id: T, @@ -254,9 +254,9 @@ fn generics() { // doesn't make sense to implement derived FromSql on a type with borrows #[derive(ToSql, Debug, PartialEq)] #[postgres(name = "InventoryItem")] - struct InventoryItemRef<'a, T: 'a + ToSql, U> + struct InventoryItemRef<'a, T: 'a + Clone, U> where - U: 'a + ToSql, + U: 'a + Clone, { name: &'a str, supplier_id: &'a T, diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index c1e495154..15bfabc13 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -1,4 +1,8 @@ -use syn::{Error, Ident, Type}; +use proc_macro2::Span; +use syn::{ + punctuated::Punctuated, Error, GenericParam, Generics, Ident, Path, PathSegment, Type, + TypeParamBound, +}; use crate::overrides::Overrides; @@ -26,3 +30,23 @@ impl Field { }) } } + +pub(crate) fn append_generic_bound(mut generics: Generics, bound: &TypeParamBound) -> Generics { + for param in &mut generics.params { + if let GenericParam::Type(param) = param { + param.bounds.push(bound.to_owned()) + } + } + generics +} + +pub(crate) fn new_derive_path(last: PathSegment) -> Path { + let mut path = Path { + leading_colon: None, + segments: Punctuated::new(), + }; + path.segments + .push(Ident::new("postgres_types", Span::call_site()).into()); + path.segments.push(last); + path +} diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 415343653..f458c6e3d 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -2,12 +2,15 @@ use proc_macro2::{Span, TokenStream}; use quote::{format_ident, quote}; use std::iter; use syn::{ - Data, DataStruct, DeriveInput, Error, Fields, GenericParam, Generics, Ident, Lifetime, - LifetimeDef, + punctuated::Punctuated, token, AngleBracketedGenericArguments, Data, DataStruct, DeriveInput, + Error, Fields, GenericArgument, GenericParam, Generics, Ident, Lifetime, LifetimeDef, + PathArguments, PathSegment, }; +use syn::{TraitBound, TraitBoundModifier, TypeParamBound}; use crate::accepts; use crate::composites::Field; +use crate::composites::{append_generic_bound, new_derive_path}; use crate::enums::Variant; use crate::overrides::Overrides; @@ -208,9 +211,10 @@ fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { } fn build_generics(source: &Generics) -> (Generics, Lifetime) { - let mut out = source.to_owned(); // don't worry about lifetime name collisions, it doesn't make sense to derive FromSql on a struct with a lifetime let lifetime = Lifetime::new("'a", Span::call_site()); + + let mut out = append_generic_bound(source.to_owned(), &new_fromsql_bound(&lifetime)); out.params.insert( 0, GenericParam::Lifetime(LifetimeDef::new(lifetime.to_owned())), @@ -218,3 +222,22 @@ fn build_generics(source: &Generics) -> (Generics, Lifetime) { (out, lifetime) } + +fn new_fromsql_bound(lifetime: &Lifetime) -> TypeParamBound { + let mut path_segment: PathSegment = Ident::new("FromSql", Span::call_site()).into(); + let mut seg_args = Punctuated::new(); + seg_args.push(GenericArgument::Lifetime(lifetime.to_owned())); + path_segment.arguments = PathArguments::AngleBracketed(AngleBracketedGenericArguments { + colon2_token: None, + lt_token: token::Lt::default(), + args: seg_args, + gt_token: token::Gt::default(), + }); + + TypeParamBound::Trait(TraitBound { + lifetimes: None, + modifier: TraitBoundModifier::None, + paren_token: None, + path: new_derive_path(path_segment), + }) +} diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 299074f8d..e51acc7fd 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -1,10 +1,14 @@ -use proc_macro2::TokenStream; +use proc_macro2::{Span, TokenStream}; use quote::quote; use std::iter; -use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{ + Data, DataStruct, DeriveInput, Error, Fields, Ident, TraitBound, TraitBoundModifier, + TypeParamBound, +}; use crate::accepts; use crate::composites::Field; +use crate::composites::{append_generic_bound, new_derive_path}; use crate::enums::Variant; use crate::overrides::Overrides; @@ -82,7 +86,8 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { }; let ident = &input.ident; - let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let generics = append_generic_bound(input.generics.to_owned(), &new_tosql_bound()); + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let out = quote! { impl#impl_generics postgres_types::ToSql for #ident#ty_generics #where_clause { fn to_sql(&self, @@ -182,3 +187,12 @@ fn composite_body(fields: &[Field]) -> TokenStream { std::result::Result::Ok(postgres_types::IsNull::No) } } + +fn new_tosql_bound() -> TypeParamBound { + TypeParamBound::Trait(TraitBound { + lifetimes: None, + modifier: TraitBoundModifier::None, + paren_token: None, + path: new_derive_path(Ident::new("ToSql", Span::call_site()).into()), + }) +} From dfe3bc04937627d176c643267dc4a25d93831ccb Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Wed, 7 Sep 2022 19:01:27 -0400 Subject: [PATCH 616/819] Release v0.4.3 --- postgres-derive/CHANGELOG.md | 6 ++++++ postgres-derive/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 1cc55bfe8..dde466a97 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.3 - 2022-09-07 + +### Added + +* Added support for parameterized structs. + ## v0.4.2 - 2022-04-30 ### Added diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 324400162..22b50b707 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.2" +version = "0.4.3" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" From e5f7f8e291928152b1dad3319a494ddbb8d507a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Sep 2022 13:19:27 +0000 Subject: [PATCH 617/819] Update criterion requirement from 0.3 to 0.4 Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Release notes](https://github.com/bheisler/criterion.rs/releases) - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.3.0...0.4.0) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 2ca65941b..bd7c297f3 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -45,5 +45,5 @@ tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" [dev-dependencies] -criterion = "0.3" +criterion = "0.4" tokio = { version = "1.0", features = ["rt-multi-thread"] } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 1a9638eff..760105104 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -61,7 +61,7 @@ tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] futures-executor = "0.3" -criterion = "0.3" +criterion = "0.4" env_logger = "0.9" tokio = { version = "1.0", features = [ "macros", From 7faeea5acc3c24493b97306f0238712bf7e0b4e9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Nov 2022 15:19:11 -0800 Subject: [PATCH 618/819] Fix clippy/deprecation warnings --- postgres-protocol/src/authentication/mod.rs | 2 +- postgres-types/src/chrono_04.rs | 8 ++++---- postgres-types/src/geo_types_07.rs | 4 ++-- postgres-types/src/lib.rs | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/postgres-protocol/src/authentication/mod.rs b/postgres-protocol/src/authentication/mod.rs index 9cfd6034c..71afa4b9b 100644 --- a/postgres-protocol/src/authentication/mod.rs +++ b/postgres-protocol/src/authentication/mod.rs @@ -15,7 +15,7 @@ pub fn md5_hash(username: &[u8], password: &[u8], salt: [u8; 4]) -> String { md5.update(username); let output = md5.finalize_reset(); md5.update(format!("{:x}", output)); - md5.update(&salt); + md5.update(salt); format!("md5{:x}", md5.finalize()) } diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index fcd25e6d1..180ee3766 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -6,7 +6,7 @@ use std::error::Error; use crate::{FromSql, IsNull, ToSql, Type}; fn base() -> NaiveDateTime { - NaiveDate::from_ymd(2000, 1, 1).and_hms(0, 0, 0) + NaiveDate::from_ymd_opt(2000, 1, 1).unwrap().and_hms_opt(0, 0, 0).unwrap() } impl<'a> FromSql<'a> for NaiveDateTime { @@ -84,7 +84,7 @@ impl<'a> FromSql<'a> for DateTime { raw: &[u8], ) -> Result, Box> { let utc = DateTime::::from_sql(type_, raw)?; - Ok(utc.with_timezone(&FixedOffset::east(0))) + Ok(utc.with_timezone(&FixedOffset::east_opt(0).unwrap())) } accepts!(TIMESTAMPTZ); @@ -133,7 +133,7 @@ impl ToSql for NaiveDate { impl<'a> FromSql<'a> for NaiveTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let usec = types::time_from_sql(raw)?; - Ok(NaiveTime::from_hms(0, 0, 0) + Duration::microseconds(usec)) + Ok(NaiveTime::from_hms_opt(0, 0, 0).unwrap() + Duration::microseconds(usec)) } accepts!(TIME); @@ -141,7 +141,7 @@ impl<'a> FromSql<'a> for NaiveTime { impl ToSql for NaiveTime { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let delta = self.signed_duration_since(NaiveTime::from_hms(0, 0, 0)); + let delta = self.signed_duration_since(NaiveTime::from_hms_opt(0, 0, 0).unwrap()); let time = match delta.num_microseconds() { Some(time) => time, None => return Err("value too large to transmit".into()), diff --git a/postgres-types/src/geo_types_07.rs b/postgres-types/src/geo_types_07.rs index 7dfb51056..bf7fa5601 100644 --- a/postgres-types/src/geo_types_07.rs +++ b/postgres-types/src/geo_types_07.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use geo_types_0_7::{Coordinate, LineString, Point, Rect}; +use geo_types_0_7::{Coord, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; @@ -52,7 +52,7 @@ impl<'a> FromSql<'a> for LineString { let path = types::path_from_sql(raw)?; let points = path .points() - .map(|p| Ok(Coordinate { x: p.x(), y: p.y() })) + .map(|p| Ok(Coord { x: p.x(), y: p.y() })) .collect()?; Ok(LineString(points)) } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index f5d841cd1..ca4233f8a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -938,7 +938,7 @@ impl<'a, T: ToSql> ToSql for &'a [T] { impl<'a> ToSql for &'a [u8] { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::bytea_to_sql(*self, w); + types::bytea_to_sql(self, w); Ok(IsNull::No) } @@ -1011,10 +1011,10 @@ impl ToSql for Vec { impl<'a> ToSql for &'a str { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { match *ty { - ref ty if ty.name() == "ltree" => types::ltree_to_sql(*self, w), - ref ty if ty.name() == "lquery" => types::lquery_to_sql(*self, w), - ref ty if ty.name() == "ltxtquery" => types::ltxtquery_to_sql(*self, w), - _ => types::text_to_sql(*self, w), + ref ty if ty.name() == "ltree" => types::ltree_to_sql(self, w), + ref ty if ty.name() == "lquery" => types::lquery_to_sql(self, w), + ref ty if ty.name() == "ltxtquery" => types::ltxtquery_to_sql(self, w), + _ => types::text_to_sql(self, w), } Ok(IsNull::No) } From 6c18ac09b95a65af45be899f9151928d91ecb48a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Nov 2022 15:20:25 -0800 Subject: [PATCH 619/819] rustfmt --- postgres-types/src/chrono_04.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 180ee3766..0ec92437d 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -6,7 +6,10 @@ use std::error::Error; use crate::{FromSql, IsNull, ToSql, Type}; fn base() -> NaiveDateTime { - NaiveDate::from_ymd_opt(2000, 1, 1).unwrap().and_hms_opt(0, 0, 0).unwrap() + NaiveDate::from_ymd_opt(2000, 1, 1) + .unwrap() + .and_hms_opt(0, 0, 0) + .unwrap() } impl<'a> FromSql<'a> for NaiveDateTime { From 6557bca87137207adc7aa4ca3c3eaee4f3beffb6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Nov 2022 15:25:39 -0800 Subject: [PATCH 620/819] more clippy --- postgres-derive-test/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index 8bfd147fb..d1478ac4c 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -16,12 +16,12 @@ where { for &(ref val, ref repr) in checks.iter() { let stmt = conn - .prepare(&*format!("SELECT {}::{}", *repr, sql_type)) + .prepare(&format!("SELECT {}::{}", *repr, sql_type)) .unwrap(); let result = conn.query_one(&stmt, &[]).unwrap().get(0); assert_eq!(val, &result); - let stmt = conn.prepare(&*format!("SELECT $1::{}", sql_type)).unwrap(); + let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); let result = conn.query_one(&stmt, &[val]).unwrap().get(0); assert_eq!(val, &result); } @@ -40,12 +40,12 @@ pub fn test_type_asymmetric( { for &(ref val, ref repr) in checks.iter() { let stmt = conn - .prepare(&*format!("SELECT {}::{}", *repr, sql_type)) + .prepare(&format!("SELECT {}::{}", *repr, sql_type)) .unwrap(); let result: F = conn.query_one(&stmt, &[]).unwrap().get(0); assert!(cmp(val, &result)); - let stmt = conn.prepare(&*format!("SELECT $1::{}", sql_type)).unwrap(); + let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); let result: F = conn.query_one(&stmt, &[val]).unwrap().get(0); assert!(cmp(val, &result)); } From f48c3b577f3e4515f2d0e88dcf026295194e4295 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 21 Nov 2022 15:26:37 -0800 Subject: [PATCH 621/819] more deprecations --- tokio-postgres/tests/test/types/geo_types_07.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tokio-postgres/tests/test/types/geo_types_07.rs b/tokio-postgres/tests/test/types/geo_types_07.rs index 85ff2553a..43a13f451 100644 --- a/tokio-postgres/tests/test/types/geo_types_07.rs +++ b/tokio-postgres/tests/test/types/geo_types_07.rs @@ -1,5 +1,5 @@ #[cfg(feature = "with-geo-types-0_7")] -use geo_types_07::{Coordinate, LineString, Point, Rect}; +use geo_types_07::{Coord, LineString, Point, Rect}; use crate::types::test_type; @@ -23,8 +23,8 @@ async fn test_box_params() { &[ ( Some(Rect::new( - Coordinate { x: -3.2, y: 1.618 }, - Coordinate { + Coord { x: -3.2, y: 1.618 }, + Coord { x: 160.0, y: 69701.5615, }, @@ -40,9 +40,9 @@ async fn test_box_params() { #[tokio::test] async fn test_path_params() { let points = vec![ - Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.2, y: 1.618 }, - Coordinate { + Coord { x: 0., y: 0. }, + Coord { x: -3.2, y: 1.618 }, + Coord { x: 160.0, y: 69701.5615, }, From b29164d53a7bcc16fed7aceb7cb6cb9a3faf214a Mon Sep 17 00:00:00 2001 From: Joseph Koshakow Date: Mon, 21 Nov 2022 12:15:38 -0500 Subject: [PATCH 622/819] Add idle session timeout error This commit adds the idle session timeout error (57P05). See https://www.postgresql.org/docs/current/errcodes-appendix.html --- tokio-postgres/src/error/sqlstate.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 71648a948..4eb1ae2d0 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -232,6 +232,7 @@ impl SqlState { Inner::E57P02 => "57P02", Inner::E57P03 => "57P03", Inner::E57P04 => "57P04", + Inner::E57P05 => "57P05", Inner::E58000 => "58000", Inner::E58030 => "58030", Inner::E58P01 => "58P01", @@ -946,6 +947,9 @@ impl SqlState { /// 57P04 pub const DATABASE_DROPPED: SqlState = SqlState(Inner::E57P04); + /// 57P05 + pub const IDLE_SESSION_TIMEOUT: SqlState = SqlState(Inner::E57P05); + /// 58000 pub const SYSTEM_ERROR: SqlState = SqlState(Inner::E58000); @@ -1292,6 +1296,7 @@ enum Inner { E57P02, E57P03, E57P04, + E57P05, E58000, E58030, E58P01, @@ -1498,6 +1503,7 @@ static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ("55006", SqlState::OBJECT_IN_USE), ("42P01", SqlState::UNDEFINED_TABLE), ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("57P05", SqlState::IDLE_SESSION_TIMEOUT), ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), ("P0004", SqlState::ASSERT_FAILURE), From 664345f9ec06920c3129be2f1b7e0dd6c031c8ef Mon Sep 17 00:00:00 2001 From: Joseph Koshakow Date: Mon, 21 Nov 2022 14:50:52 -0500 Subject: [PATCH 623/819] Update PostgreSQL files to most recent version --- codegen/src/errcodes.txt | 4 +- codegen/src/pg_range.dat | 23 +++--- codegen/src/pg_type.dat | 160 +++++++++++++++++++++++++++------------ 3 files changed, 127 insertions(+), 60 deletions(-) diff --git a/codegen/src/errcodes.txt b/codegen/src/errcodes.txt index c79312ed0..62418a051 100644 --- a/codegen/src/errcodes.txt +++ b/codegen/src/errcodes.txt @@ -2,7 +2,7 @@ # errcodes.txt # PostgreSQL error codes # -# Copyright (c) 2003-2020, PostgreSQL Global Development Group +# Copyright (c) 2003-2022, PostgreSQL Global Development Group # # This list serves as the basis for generating source files containing error # codes. It is kept in a common format to make sure all these source files have @@ -222,6 +222,7 @@ Section: Class 22 - Data Exception 2203D E ERRCODE_TOO_MANY_JSON_ARRAY_ELEMENTS too_many_json_array_elements 2203E E ERRCODE_TOO_MANY_JSON_OBJECT_MEMBERS too_many_json_object_members 2203F E ERRCODE_SQL_JSON_SCALAR_REQUIRED sql_json_scalar_required +2203G E ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE sql_json_item_cannot_be_cast_to_target_type Section: Class 23 - Integrity Constraint Violation @@ -428,6 +429,7 @@ Section: Class 57 - Operator Intervention 57P02 E ERRCODE_CRASH_SHUTDOWN crash_shutdown 57P03 E ERRCODE_CANNOT_CONNECT_NOW cannot_connect_now 57P04 E ERRCODE_DATABASE_DROPPED database_dropped +57P05 E ERRCODE_IDLE_SESSION_TIMEOUT idle_session_timeout Section: Class 58 - System Error (errors external to PostgreSQL itself) diff --git a/codegen/src/pg_range.dat b/codegen/src/pg_range.dat index 479754c24..74d6de0cf 100644 --- a/codegen/src/pg_range.dat +++ b/codegen/src/pg_range.dat @@ -3,7 +3,7 @@ # pg_range.dat # Initial contents of the pg_range system catalog. # -# Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_range.dat @@ -12,20 +12,23 @@ [ -{ rngtypid => 'int4range', rngsubtype => 'int4', rngsubopc => 'btree/int4_ops', +{ rngtypid => 'int4range', rngsubtype => 'int4', + rngmultitypid => 'int4multirange', rngsubopc => 'btree/int4_ops', rngcanonical => 'int4range_canonical', rngsubdiff => 'int4range_subdiff' }, { rngtypid => 'numrange', rngsubtype => 'numeric', - rngsubopc => 'btree/numeric_ops', rngcanonical => '-', - rngsubdiff => 'numrange_subdiff' }, + rngmultitypid => 'nummultirange', rngsubopc => 'btree/numeric_ops', + rngcanonical => '-', rngsubdiff => 'numrange_subdiff' }, { rngtypid => 'tsrange', rngsubtype => 'timestamp', - rngsubopc => 'btree/timestamp_ops', rngcanonical => '-', - rngsubdiff => 'tsrange_subdiff' }, + rngmultitypid => 'tsmultirange', rngsubopc => 'btree/timestamp_ops', + rngcanonical => '-', rngsubdiff => 'tsrange_subdiff' }, { rngtypid => 'tstzrange', rngsubtype => 'timestamptz', - rngsubopc => 'btree/timestamptz_ops', rngcanonical => '-', - rngsubdiff => 'tstzrange_subdiff' }, -{ rngtypid => 'daterange', rngsubtype => 'date', rngsubopc => 'btree/date_ops', + rngmultitypid => 'tstzmultirange', rngsubopc => 'btree/timestamptz_ops', + rngcanonical => '-', rngsubdiff => 'tstzrange_subdiff' }, +{ rngtypid => 'daterange', rngsubtype => 'date', + rngmultitypid => 'datemultirange', rngsubopc => 'btree/date_ops', rngcanonical => 'daterange_canonical', rngsubdiff => 'daterange_subdiff' }, -{ rngtypid => 'int8range', rngsubtype => 'int8', rngsubopc => 'btree/int8_ops', +{ rngtypid => 'int8range', rngsubtype => 'int8', + rngmultitypid => 'int8multirange', rngsubopc => 'btree/int8_ops', rngcanonical => 'int8range_canonical', rngsubdiff => 'int8range_subdiff' }, ] diff --git a/codegen/src/pg_type.dat b/codegen/src/pg_type.dat index e8be00083..df4587946 100644 --- a/codegen/src/pg_type.dat +++ b/codegen/src/pg_type.dat @@ -3,7 +3,7 @@ # pg_type.dat # Initial contents of the pg_type system catalog. # -# Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_type.dat @@ -15,14 +15,10 @@ # For types used in the system catalogs, make sure the values here match # TypInfo[] in bootstrap.c. -# OID symbol macro names for pg_type OIDs are generated by genbki.pl -# according to the following rule, so you don't need to specify them -# here: +# OID symbol macro names for pg_type OIDs are not specified here because +# they are generated by genbki.pl according to the following rule: # foo_bar -> FOO_BAROID # _foo_bar -> FOO_BARARRAYOID -# -# The only oid_symbol entries in this file are for names that don't match -# this rule, and are grandfathered in. # To autogenerate an array type, add 'array_type_oid => 'nnnn' to the element # type, which will instruct genbki.pl to generate a BKI entry for it. @@ -46,15 +42,16 @@ typinput => 'byteain', typoutput => 'byteaout', typreceive => 'bytearecv', typsend => 'byteasend', typalign => 'i', typstorage => 'x' }, { oid => '18', array_type_oid => '1002', descr => 'single character', - typname => 'char', typlen => '1', typbyval => 't', typcategory => 'S', + typname => 'char', typlen => '1', typbyval => 't', typcategory => 'Z', typinput => 'charin', typoutput => 'charout', typreceive => 'charrecv', typsend => 'charsend', typalign => 'c' }, { oid => '19', array_type_oid => '1003', descr => '63-byte type for storing system identifiers', typname => 'name', typlen => 'NAMEDATALEN', typbyval => 'f', - typcategory => 'S', typelem => 'char', typinput => 'namein', - typoutput => 'nameout', typreceive => 'namerecv', typsend => 'namesend', - typalign => 'c', typcollation => 'C' }, + typcategory => 'S', typsubscript => 'raw_array_subscript_handler', + typelem => 'char', typinput => 'namein', typoutput => 'nameout', + typreceive => 'namerecv', typsend => 'namesend', typalign => 'c', + typcollation => 'C' }, { oid => '20', array_type_oid => '1016', descr => '~18 digit integer, 8-byte storage', typname => 'int8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', @@ -68,7 +65,8 @@ { oid => '22', array_type_oid => '1006', descr => 'array of int2, used in system tables', typname => 'int2vector', typlen => '-1', typbyval => 'f', typcategory => 'A', - typelem => 'int2', typinput => 'int2vectorin', typoutput => 'int2vectorout', + typsubscript => 'array_subscript_handler', typelem => 'int2', + typinput => 'int2vectorin', typoutput => 'int2vectorout', typreceive => 'int2vectorrecv', typsend => 'int2vectorsend', typalign => 'i' }, { oid => '23', array_type_oid => '1007', @@ -108,27 +106,28 @@ { oid => '30', array_type_oid => '1013', descr => 'array of oids, used in system tables', typname => 'oidvector', typlen => '-1', typbyval => 'f', typcategory => 'A', - typelem => 'oid', typinput => 'oidvectorin', typoutput => 'oidvectorout', + typsubscript => 'array_subscript_handler', typelem => 'oid', + typinput => 'oidvectorin', typoutput => 'oidvectorout', typreceive => 'oidvectorrecv', typsend => 'oidvectorsend', typalign => 'i' }, # hand-built rowtype entries for bootstrapped catalogs # NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations -{ oid => '71', +{ oid => '71', array_type_oid => '210', typname => 'pg_type', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_type', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '75', +{ oid => '75', array_type_oid => '270', typname => 'pg_attribute', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_attribute', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '81', +{ oid => '81', array_type_oid => '272', typname => 'pg_proc', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_proc', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '83', +{ oid => '83', array_type_oid => '273', typname => 'pg_class', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_class', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', @@ -144,35 +143,30 @@ typname => 'xml', typlen => '-1', typbyval => 'f', typcategory => 'U', typinput => 'xml_in', typoutput => 'xml_out', typreceive => 'xml_recv', typsend => 'xml_send', typalign => 'i', typstorage => 'x' }, -{ oid => '194', oid_symbol => 'PGNODETREEOID', - descr => 'string representing an internal node tree', +{ oid => '194', descr => 'string representing an internal node tree', typname => 'pg_node_tree', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_node_tree_in', + typcategory => 'Z', typinput => 'pg_node_tree_in', typoutput => 'pg_node_tree_out', typreceive => 'pg_node_tree_recv', typsend => 'pg_node_tree_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '3361', oid_symbol => 'PGNDISTINCTOID', - descr => 'multivariate ndistinct coefficients', +{ oid => '3361', descr => 'multivariate ndistinct coefficients', typname => 'pg_ndistinct', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_ndistinct_in', + typcategory => 'Z', typinput => 'pg_ndistinct_in', typoutput => 'pg_ndistinct_out', typreceive => 'pg_ndistinct_recv', typsend => 'pg_ndistinct_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '3402', oid_symbol => 'PGDEPENDENCIESOID', - descr => 'multivariate dependencies', +{ oid => '3402', descr => 'multivariate dependencies', typname => 'pg_dependencies', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_dependencies_in', + typcategory => 'Z', typinput => 'pg_dependencies_in', typoutput => 'pg_dependencies_out', typreceive => 'pg_dependencies_recv', typsend => 'pg_dependencies_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '5017', oid_symbol => 'PGMCVLISTOID', - descr => 'multivariate MCV list', - typname => 'pg_mcv_list', typlen => '-1', typbyval => 'f', typcategory => 'S', +{ oid => '5017', descr => 'multivariate MCV list', + typname => 'pg_mcv_list', typlen => '-1', typbyval => 'f', typcategory => 'Z', typinput => 'pg_mcv_list_in', typoutput => 'pg_mcv_list_out', typreceive => 'pg_mcv_list_recv', typsend => 'pg_mcv_list_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '32', oid_symbol => 'PGDDLCOMMANDOID', - descr => 'internal type for passing CollectedCommand', +{ oid => '32', descr => 'internal type for passing CollectedCommand', typname => 'pg_ddl_command', typlen => 'SIZEOF_POINTER', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'pg_ddl_command_in', typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', @@ -187,13 +181,15 @@ { oid => '600', array_type_oid => '1017', descr => 'geometric point \'(x, y)\'', typname => 'point', typlen => '16', typbyval => 'f', typcategory => 'G', - typelem => 'float8', typinput => 'point_in', typoutput => 'point_out', - typreceive => 'point_recv', typsend => 'point_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'float8', + typinput => 'point_in', typoutput => 'point_out', typreceive => 'point_recv', + typsend => 'point_send', typalign => 'd' }, { oid => '601', array_type_oid => '1018', descr => 'geometric line segment \'(pt1,pt2)\'', typname => 'lseg', typlen => '32', typbyval => 'f', typcategory => 'G', - typelem => 'point', typinput => 'lseg_in', typoutput => 'lseg_out', - typreceive => 'lseg_recv', typsend => 'lseg_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'point', + typinput => 'lseg_in', typoutput => 'lseg_out', typreceive => 'lseg_recv', + typsend => 'lseg_send', typalign => 'd' }, { oid => '602', array_type_oid => '1019', descr => 'geometric path \'(pt1,...)\'', typname => 'path', typlen => '-1', typbyval => 'f', typcategory => 'G', @@ -202,9 +198,9 @@ { oid => '603', array_type_oid => '1020', descr => 'geometric box \'(lower left,upper right)\'', typname => 'box', typlen => '32', typbyval => 'f', typcategory => 'G', - typdelim => ';', typelem => 'point', typinput => 'box_in', - typoutput => 'box_out', typreceive => 'box_recv', typsend => 'box_send', - typalign => 'd' }, + typdelim => ';', typsubscript => 'raw_array_subscript_handler', + typelem => 'point', typinput => 'box_in', typoutput => 'box_out', + typreceive => 'box_recv', typsend => 'box_send', typalign => 'd' }, { oid => '604', array_type_oid => '1027', descr => 'geometric polygon \'(pt1,...)\'', typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G', @@ -212,8 +208,9 @@ typsend => 'poly_send', typalign => 'd', typstorage => 'x' }, { oid => '628', array_type_oid => '629', descr => 'geometric line', typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G', - typelem => 'float8', typinput => 'line_in', typoutput => 'line_out', - typreceive => 'line_recv', typsend => 'line_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'float8', + typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv', + typsend => 'line_send', typalign => 'd' }, # OIDS 700 - 799 @@ -237,7 +234,7 @@ typname => 'circle', typlen => '24', typbyval => 'f', typcategory => 'G', typinput => 'circle_in', typoutput => 'circle_out', typreceive => 'circle_recv', typsend => 'circle_send', typalign => 'd' }, -{ oid => '790', oid_symbol => 'CASHOID', array_type_oid => '791', +{ oid => '790', array_type_oid => '791', descr => 'monetary amounts, $d,ddd.cc', typname => 'money', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', typcategory => 'N', typinput => 'cash_in', typoutput => 'cash_out', @@ -409,8 +406,7 @@ typsend => 'uuid_send', typalign => 'c' }, # pg_lsn -{ oid => '3220', oid_symbol => 'LSNOID', array_type_oid => '3221', - descr => 'PostgreSQL LSN datatype', +{ oid => '3220', array_type_oid => '3221', descr => 'PostgreSQL LSN datatype', typname => 'pg_lsn', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', typcategory => 'U', typinput => 'pg_lsn_in', typoutput => 'pg_lsn_out', typreceive => 'pg_lsn_recv', typsend => 'pg_lsn_send', typalign => 'd' }, @@ -447,8 +443,9 @@ # jsonb { oid => '3802', array_type_oid => '3807', descr => 'Binary JSON', typname => 'jsonb', typlen => '-1', typbyval => 'f', typcategory => 'U', - typinput => 'jsonb_in', typoutput => 'jsonb_out', typreceive => 'jsonb_recv', - typsend => 'jsonb_send', typalign => 'i', typstorage => 'x' }, + typsubscript => 'jsonb_subscript_handler', typinput => 'jsonb_in', + typoutput => 'jsonb_out', typreceive => 'jsonb_recv', typsend => 'jsonb_send', + typalign => 'i', typstorage => 'x' }, { oid => '4072', array_type_oid => '4073', descr => 'JSON path', typname => 'jsonpath', typlen => '-1', typbyval => 'f', typcategory => 'U', typinput => 'jsonpath_in', typoutput => 'jsonpath_out', @@ -500,6 +497,46 @@ typreceive => 'range_recv', typsend => 'range_send', typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +# multirange types +{ oid => '4451', array_type_oid => '6150', descr => 'multirange of integers', + typname => 'int4multirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4532', array_type_oid => '6151', descr => 'multirange of numerics', + typname => 'nummultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4533', array_type_oid => '6152', + descr => 'multirange of timestamps without time zone', + typname => 'tsmultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, +{ oid => '4534', array_type_oid => '6153', + descr => 'multirange of timestamps with time zone', + typname => 'tstzmultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, +{ oid => '4535', array_type_oid => '6155', descr => 'multirange of dates', + typname => 'datemultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4536', array_type_oid => '6157', descr => 'multirange of bigints', + typname => 'int8multirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, + # pseudo-types # types with typtype='p' represent various special cases in the type system. # These cannot be used to define table columns, but are valid as function @@ -517,8 +554,9 @@ # Arrays of records have typcategory P, so they can't be autogenerated. { oid => '2287', typname => '_record', typlen => '-1', typbyval => 'f', typtype => 'p', - typcategory => 'P', typelem => 'record', typinput => 'array_in', - typoutput => 'array_out', typreceive => 'array_recv', typsend => 'array_send', + typcategory => 'P', typsubscript => 'array_subscript_handler', + typelem => 'record', typinput => 'array_in', typoutput => 'array_out', + typreceive => 'array_recv', typsend => 'array_send', typanalyze => 'array_typanalyze', typalign => 'd', typstorage => 'x' }, { oid => '2275', array_type_oid => '1263', descr => 'C-style string', typname => 'cstring', typlen => '-2', typbyval => 'f', typtype => 'p', @@ -542,7 +580,7 @@ typname => 'trigger', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'trigger_in', typoutput => 'trigger_out', typreceive => '-', typsend => '-', typalign => 'i' }, -{ oid => '3838', oid_symbol => 'EVTTRIGGEROID', +{ oid => '3838', descr => 'pseudo-type for the result of an event trigger function', typname => 'event_trigger', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'event_trigger_in', @@ -629,5 +667,29 @@ typtype => 'p', typcategory => 'P', typinput => 'anycompatiblerange_in', typoutput => 'anycompatiblerange_out', typreceive => '-', typsend => '-', typalign => 'd', typstorage => 'x' }, - +{ oid => '4537', + descr => 'pseudo-type representing a polymorphic base type that is a multirange', + typname => 'anymultirange', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anymultirange_in', + typoutput => 'anymultirange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, +{ oid => '4538', + descr => 'pseudo-type representing a multirange over a polymorphic common type', + typname => 'anycompatiblemultirange', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblemultirange_in', + typoutput => 'anycompatiblemultirange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, +{ oid => '4600', descr => 'BRIN bloom summary', + typname => 'pg_brin_bloom_summary', typlen => '-1', typbyval => 'f', + typcategory => 'Z', typinput => 'brin_bloom_summary_in', + typoutput => 'brin_bloom_summary_out', + typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send', + typalign => 'i', typstorage => 'x', typcollation => 'default' }, +{ oid => '4601', descr => 'BRIN minmax-multi summary', + typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f', + typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in', + typoutput => 'brin_minmax_multi_summary_out', + typreceive => 'brin_minmax_multi_summary_recv', + typsend => 'brin_minmax_multi_summary_send', typalign => 'i', + typstorage => 'x', typcollation => 'default' }, ] From f413e66b14f9b4574b69005a4671441ea10b2e6e Mon Sep 17 00:00:00 2001 From: Joseph Koshakow Date: Mon, 21 Nov 2022 15:24:21 -0500 Subject: [PATCH 624/819] Add support for multiranges --- codegen/src/type_gen.rs | 15 +- postgres-types/src/type_gen.rs | 840 +++++++++++++++++++++------ tokio-postgres/src/error/sqlstate.rs | 572 +++++++++--------- 3 files changed, 971 insertions(+), 456 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 7e92e062a..249c5530a 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -185,6 +185,15 @@ fn parse_types() -> BTreeMap { ) }) .collect::>(); + let multi_range_elements = raw_ranges + .iter() + .map(|m| { + ( + oids_by_name[&*m["rngmultitypid"]], + oids_by_name[&*m["rngsubtype"]], + ) + }) + .collect::>(); let range_vector_re = Regex::new("(range|vector)$").unwrap(); let array_re = Regex::new("^_(.*)").unwrap(); @@ -209,7 +218,11 @@ fn parse_types() -> BTreeMap { } let element = match &*kind { - "R" => range_elements[&oid], + "R" => match &*raw_type["typtype"] { + "r" => range_elements[&oid], + "m" => multi_range_elements[&oid], + typtype => panic!("invalid range typtype {}", typtype), + } "A" => oids_by_name[&raw_type["typelem"]], _ => 0, }; diff --git a/postgres-types/src/type_gen.rs b/postgres-types/src/type_gen.rs index 85978096a..901fb46e0 100644 --- a/postgres-types/src/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::{Kind, Oid, Type}; +use crate::{Type, Oid, Kind}; #[derive(PartialEq, Eq, Debug, Hash)] pub struct Other { @@ -174,6 +174,16 @@ pub enum Inner { RegroleArray, Regcollation, RegcollationArray, + Int4multiRange, + NummultiRange, + TsmultiRange, + TstzmultiRange, + DatemultiRange, + Int8multiRange, + AnymultiRange, + AnycompatiblemultiRange, + PgBrinBloomSummary, + PgBrinMinmaxMultiSummary, PgMcvList, PgSnapshot, PgSnapshotArray, @@ -182,6 +192,12 @@ pub enum Inner { Anycompatiblearray, Anycompatiblenonarray, AnycompatibleRange, + Int4multiRangeArray, + NummultiRangeArray, + TsmultiRangeArray, + TstzmultiRangeArray, + DatemultiRangeArray, + Int8multiRangeArray, Other(Arc), } @@ -349,6 +365,16 @@ impl Inner { 4097 => Some(Inner::RegroleArray), 4191 => Some(Inner::Regcollation), 4192 => Some(Inner::RegcollationArray), + 4451 => Some(Inner::Int4multiRange), + 4532 => Some(Inner::NummultiRange), + 4533 => Some(Inner::TsmultiRange), + 4534 => Some(Inner::TstzmultiRange), + 4535 => Some(Inner::DatemultiRange), + 4536 => Some(Inner::Int8multiRange), + 4537 => Some(Inner::AnymultiRange), + 4538 => Some(Inner::AnycompatiblemultiRange), + 4600 => Some(Inner::PgBrinBloomSummary), + 4601 => Some(Inner::PgBrinMinmaxMultiSummary), 5017 => Some(Inner::PgMcvList), 5038 => Some(Inner::PgSnapshot), 5039 => Some(Inner::PgSnapshotArray), @@ -357,6 +383,12 @@ impl Inner { 5078 => Some(Inner::Anycompatiblearray), 5079 => Some(Inner::Anycompatiblenonarray), 5080 => Some(Inner::AnycompatibleRange), + 6150 => Some(Inner::Int4multiRangeArray), + 6151 => Some(Inner::NummultiRangeArray), + 6152 => Some(Inner::TsmultiRangeArray), + 6153 => Some(Inner::TstzmultiRangeArray), + 6155 => Some(Inner::DatemultiRangeArray), + 6157 => Some(Inner::Int8multiRangeArray), _ => None, } } @@ -524,6 +556,16 @@ impl Inner { Inner::RegroleArray => 4097, Inner::Regcollation => 4191, Inner::RegcollationArray => 4192, + Inner::Int4multiRange => 4451, + Inner::NummultiRange => 4532, + Inner::TsmultiRange => 4533, + Inner::TstzmultiRange => 4534, + Inner::DatemultiRange => 4535, + Inner::Int8multiRange => 4536, + Inner::AnymultiRange => 4537, + Inner::AnycompatiblemultiRange => 4538, + Inner::PgBrinBloomSummary => 4600, + Inner::PgBrinMinmaxMultiSummary => 4601, Inner::PgMcvList => 5017, Inner::PgSnapshot => 5038, Inner::PgSnapshotArray => 5039, @@ -532,181 +574,573 @@ impl Inner { Inner::Anycompatiblearray => 5078, Inner::Anycompatiblenonarray => 5079, Inner::AnycompatibleRange => 5080, + Inner::Int4multiRangeArray => 6150, + Inner::NummultiRangeArray => 6151, + Inner::TsmultiRangeArray => 6152, + Inner::TstzmultiRangeArray => 6153, + Inner::DatemultiRangeArray => 6155, + Inner::Int8multiRangeArray => 6157, Inner::Other(ref u) => u.oid, } } pub fn kind(&self) -> &Kind { match *self { - Inner::Bool => &Kind::Simple, - Inner::Bytea => &Kind::Simple, - Inner::Char => &Kind::Simple, - Inner::Name => &Kind::Simple, - Inner::Int8 => &Kind::Simple, - Inner::Int2 => &Kind::Simple, - Inner::Int2Vector => &Kind::Array(Type(Inner::Int2)), - Inner::Int4 => &Kind::Simple, - Inner::Regproc => &Kind::Simple, - Inner::Text => &Kind::Simple, - Inner::Oid => &Kind::Simple, - Inner::Tid => &Kind::Simple, - Inner::Xid => &Kind::Simple, - Inner::Cid => &Kind::Simple, - Inner::OidVector => &Kind::Array(Type(Inner::Oid)), - Inner::PgDdlCommand => &Kind::Pseudo, - Inner::Json => &Kind::Simple, - Inner::Xml => &Kind::Simple, - Inner::XmlArray => &Kind::Array(Type(Inner::Xml)), - Inner::PgNodeTree => &Kind::Simple, - Inner::JsonArray => &Kind::Array(Type(Inner::Json)), - Inner::TableAmHandler => &Kind::Pseudo, - Inner::Xid8Array => &Kind::Array(Type(Inner::Xid8)), - Inner::IndexAmHandler => &Kind::Pseudo, - Inner::Point => &Kind::Simple, - Inner::Lseg => &Kind::Simple, - Inner::Path => &Kind::Simple, - Inner::Box => &Kind::Simple, - Inner::Polygon => &Kind::Simple, - Inner::Line => &Kind::Simple, - Inner::LineArray => &Kind::Array(Type(Inner::Line)), - Inner::Cidr => &Kind::Simple, - Inner::CidrArray => &Kind::Array(Type(Inner::Cidr)), - Inner::Float4 => &Kind::Simple, - Inner::Float8 => &Kind::Simple, - Inner::Unknown => &Kind::Simple, - Inner::Circle => &Kind::Simple, - Inner::CircleArray => &Kind::Array(Type(Inner::Circle)), - Inner::Macaddr8 => &Kind::Simple, - Inner::Macaddr8Array => &Kind::Array(Type(Inner::Macaddr8)), - Inner::Money => &Kind::Simple, - Inner::MoneyArray => &Kind::Array(Type(Inner::Money)), - Inner::Macaddr => &Kind::Simple, - Inner::Inet => &Kind::Simple, - Inner::BoolArray => &Kind::Array(Type(Inner::Bool)), - Inner::ByteaArray => &Kind::Array(Type(Inner::Bytea)), - Inner::CharArray => &Kind::Array(Type(Inner::Char)), - Inner::NameArray => &Kind::Array(Type(Inner::Name)), - Inner::Int2Array => &Kind::Array(Type(Inner::Int2)), - Inner::Int2VectorArray => &Kind::Array(Type(Inner::Int2Vector)), - Inner::Int4Array => &Kind::Array(Type(Inner::Int4)), - Inner::RegprocArray => &Kind::Array(Type(Inner::Regproc)), - Inner::TextArray => &Kind::Array(Type(Inner::Text)), - Inner::TidArray => &Kind::Array(Type(Inner::Tid)), - Inner::XidArray => &Kind::Array(Type(Inner::Xid)), - Inner::CidArray => &Kind::Array(Type(Inner::Cid)), - Inner::OidVectorArray => &Kind::Array(Type(Inner::OidVector)), - Inner::BpcharArray => &Kind::Array(Type(Inner::Bpchar)), - Inner::VarcharArray => &Kind::Array(Type(Inner::Varchar)), - Inner::Int8Array => &Kind::Array(Type(Inner::Int8)), - Inner::PointArray => &Kind::Array(Type(Inner::Point)), - Inner::LsegArray => &Kind::Array(Type(Inner::Lseg)), - Inner::PathArray => &Kind::Array(Type(Inner::Path)), - Inner::BoxArray => &Kind::Array(Type(Inner::Box)), - Inner::Float4Array => &Kind::Array(Type(Inner::Float4)), - Inner::Float8Array => &Kind::Array(Type(Inner::Float8)), - Inner::PolygonArray => &Kind::Array(Type(Inner::Polygon)), - Inner::OidArray => &Kind::Array(Type(Inner::Oid)), - Inner::Aclitem => &Kind::Simple, - Inner::AclitemArray => &Kind::Array(Type(Inner::Aclitem)), - Inner::MacaddrArray => &Kind::Array(Type(Inner::Macaddr)), - Inner::InetArray => &Kind::Array(Type(Inner::Inet)), - Inner::Bpchar => &Kind::Simple, - Inner::Varchar => &Kind::Simple, - Inner::Date => &Kind::Simple, - Inner::Time => &Kind::Simple, - Inner::Timestamp => &Kind::Simple, - Inner::TimestampArray => &Kind::Array(Type(Inner::Timestamp)), - Inner::DateArray => &Kind::Array(Type(Inner::Date)), - Inner::TimeArray => &Kind::Array(Type(Inner::Time)), - Inner::Timestamptz => &Kind::Simple, - Inner::TimestamptzArray => &Kind::Array(Type(Inner::Timestamptz)), - Inner::Interval => &Kind::Simple, - Inner::IntervalArray => &Kind::Array(Type(Inner::Interval)), - Inner::NumericArray => &Kind::Array(Type(Inner::Numeric)), - Inner::CstringArray => &Kind::Array(Type(Inner::Cstring)), - Inner::Timetz => &Kind::Simple, - Inner::TimetzArray => &Kind::Array(Type(Inner::Timetz)), - Inner::Bit => &Kind::Simple, - Inner::BitArray => &Kind::Array(Type(Inner::Bit)), - Inner::Varbit => &Kind::Simple, - Inner::VarbitArray => &Kind::Array(Type(Inner::Varbit)), - Inner::Numeric => &Kind::Simple, - Inner::Refcursor => &Kind::Simple, - Inner::RefcursorArray => &Kind::Array(Type(Inner::Refcursor)), - Inner::Regprocedure => &Kind::Simple, - Inner::Regoper => &Kind::Simple, - Inner::Regoperator => &Kind::Simple, - Inner::Regclass => &Kind::Simple, - Inner::Regtype => &Kind::Simple, - Inner::RegprocedureArray => &Kind::Array(Type(Inner::Regprocedure)), - Inner::RegoperArray => &Kind::Array(Type(Inner::Regoper)), - Inner::RegoperatorArray => &Kind::Array(Type(Inner::Regoperator)), - Inner::RegclassArray => &Kind::Array(Type(Inner::Regclass)), - Inner::RegtypeArray => &Kind::Array(Type(Inner::Regtype)), - Inner::Record => &Kind::Pseudo, - Inner::Cstring => &Kind::Pseudo, - Inner::Any => &Kind::Pseudo, - Inner::Anyarray => &Kind::Pseudo, - Inner::Void => &Kind::Pseudo, - Inner::Trigger => &Kind::Pseudo, - Inner::LanguageHandler => &Kind::Pseudo, - Inner::Internal => &Kind::Pseudo, - Inner::Anyelement => &Kind::Pseudo, - Inner::RecordArray => &Kind::Pseudo, - Inner::Anynonarray => &Kind::Pseudo, - Inner::TxidSnapshotArray => &Kind::Array(Type(Inner::TxidSnapshot)), - Inner::Uuid => &Kind::Simple, - Inner::UuidArray => &Kind::Array(Type(Inner::Uuid)), - Inner::TxidSnapshot => &Kind::Simple, - Inner::FdwHandler => &Kind::Pseudo, - Inner::PgLsn => &Kind::Simple, - Inner::PgLsnArray => &Kind::Array(Type(Inner::PgLsn)), - Inner::TsmHandler => &Kind::Pseudo, - Inner::PgNdistinct => &Kind::Simple, - Inner::PgDependencies => &Kind::Simple, - Inner::Anyenum => &Kind::Pseudo, - Inner::TsVector => &Kind::Simple, - Inner::Tsquery => &Kind::Simple, - Inner::GtsVector => &Kind::Simple, - Inner::TsVectorArray => &Kind::Array(Type(Inner::TsVector)), - Inner::GtsVectorArray => &Kind::Array(Type(Inner::GtsVector)), - Inner::TsqueryArray => &Kind::Array(Type(Inner::Tsquery)), - Inner::Regconfig => &Kind::Simple, - Inner::RegconfigArray => &Kind::Array(Type(Inner::Regconfig)), - Inner::Regdictionary => &Kind::Simple, - Inner::RegdictionaryArray => &Kind::Array(Type(Inner::Regdictionary)), - Inner::Jsonb => &Kind::Simple, - Inner::JsonbArray => &Kind::Array(Type(Inner::Jsonb)), - Inner::AnyRange => &Kind::Pseudo, - Inner::EventTrigger => &Kind::Pseudo, - Inner::Int4Range => &Kind::Range(Type(Inner::Int4)), - Inner::Int4RangeArray => &Kind::Array(Type(Inner::Int4Range)), - Inner::NumRange => &Kind::Range(Type(Inner::Numeric)), - Inner::NumRangeArray => &Kind::Array(Type(Inner::NumRange)), - Inner::TsRange => &Kind::Range(Type(Inner::Timestamp)), - Inner::TsRangeArray => &Kind::Array(Type(Inner::TsRange)), - Inner::TstzRange => &Kind::Range(Type(Inner::Timestamptz)), - Inner::TstzRangeArray => &Kind::Array(Type(Inner::TstzRange)), - Inner::DateRange => &Kind::Range(Type(Inner::Date)), - Inner::DateRangeArray => &Kind::Array(Type(Inner::DateRange)), - Inner::Int8Range => &Kind::Range(Type(Inner::Int8)), - Inner::Int8RangeArray => &Kind::Array(Type(Inner::Int8Range)), - Inner::Jsonpath => &Kind::Simple, - Inner::JsonpathArray => &Kind::Array(Type(Inner::Jsonpath)), - Inner::Regnamespace => &Kind::Simple, - Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), - Inner::Regrole => &Kind::Simple, - Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), - Inner::Regcollation => &Kind::Simple, - Inner::RegcollationArray => &Kind::Array(Type(Inner::Regcollation)), - Inner::PgMcvList => &Kind::Simple, - Inner::PgSnapshot => &Kind::Simple, - Inner::PgSnapshotArray => &Kind::Array(Type(Inner::PgSnapshot)), - Inner::Xid8 => &Kind::Simple, - Inner::Anycompatible => &Kind::Pseudo, - Inner::Anycompatiblearray => &Kind::Pseudo, - Inner::Anycompatiblenonarray => &Kind::Pseudo, - Inner::AnycompatibleRange => &Kind::Pseudo, + Inner::Bool => { + &Kind::Simple + } + Inner::Bytea => { + &Kind::Simple + } + Inner::Char => { + &Kind::Simple + } + Inner::Name => { + &Kind::Simple + } + Inner::Int8 => { + &Kind::Simple + } + Inner::Int2 => { + &Kind::Simple + } + Inner::Int2Vector => { + &Kind::Array(Type(Inner::Int2)) + } + Inner::Int4 => { + &Kind::Simple + } + Inner::Regproc => { + &Kind::Simple + } + Inner::Text => { + &Kind::Simple + } + Inner::Oid => { + &Kind::Simple + } + Inner::Tid => { + &Kind::Simple + } + Inner::Xid => { + &Kind::Simple + } + Inner::Cid => { + &Kind::Simple + } + Inner::OidVector => { + &Kind::Array(Type(Inner::Oid)) + } + Inner::PgDdlCommand => { + &Kind::Pseudo + } + Inner::Json => { + &Kind::Simple + } + Inner::Xml => { + &Kind::Simple + } + Inner::XmlArray => { + &Kind::Array(Type(Inner::Xml)) + } + Inner::PgNodeTree => { + &Kind::Simple + } + Inner::JsonArray => { + &Kind::Array(Type(Inner::Json)) + } + Inner::TableAmHandler => { + &Kind::Pseudo + } + Inner::Xid8Array => { + &Kind::Array(Type(Inner::Xid8)) + } + Inner::IndexAmHandler => { + &Kind::Pseudo + } + Inner::Point => { + &Kind::Simple + } + Inner::Lseg => { + &Kind::Simple + } + Inner::Path => { + &Kind::Simple + } + Inner::Box => { + &Kind::Simple + } + Inner::Polygon => { + &Kind::Simple + } + Inner::Line => { + &Kind::Simple + } + Inner::LineArray => { + &Kind::Array(Type(Inner::Line)) + } + Inner::Cidr => { + &Kind::Simple + } + Inner::CidrArray => { + &Kind::Array(Type(Inner::Cidr)) + } + Inner::Float4 => { + &Kind::Simple + } + Inner::Float8 => { + &Kind::Simple + } + Inner::Unknown => { + &Kind::Simple + } + Inner::Circle => { + &Kind::Simple + } + Inner::CircleArray => { + &Kind::Array(Type(Inner::Circle)) + } + Inner::Macaddr8 => { + &Kind::Simple + } + Inner::Macaddr8Array => { + &Kind::Array(Type(Inner::Macaddr8)) + } + Inner::Money => { + &Kind::Simple + } + Inner::MoneyArray => { + &Kind::Array(Type(Inner::Money)) + } + Inner::Macaddr => { + &Kind::Simple + } + Inner::Inet => { + &Kind::Simple + } + Inner::BoolArray => { + &Kind::Array(Type(Inner::Bool)) + } + Inner::ByteaArray => { + &Kind::Array(Type(Inner::Bytea)) + } + Inner::CharArray => { + &Kind::Array(Type(Inner::Char)) + } + Inner::NameArray => { + &Kind::Array(Type(Inner::Name)) + } + Inner::Int2Array => { + &Kind::Array(Type(Inner::Int2)) + } + Inner::Int2VectorArray => { + &Kind::Array(Type(Inner::Int2Vector)) + } + Inner::Int4Array => { + &Kind::Array(Type(Inner::Int4)) + } + Inner::RegprocArray => { + &Kind::Array(Type(Inner::Regproc)) + } + Inner::TextArray => { + &Kind::Array(Type(Inner::Text)) + } + Inner::TidArray => { + &Kind::Array(Type(Inner::Tid)) + } + Inner::XidArray => { + &Kind::Array(Type(Inner::Xid)) + } + Inner::CidArray => { + &Kind::Array(Type(Inner::Cid)) + } + Inner::OidVectorArray => { + &Kind::Array(Type(Inner::OidVector)) + } + Inner::BpcharArray => { + &Kind::Array(Type(Inner::Bpchar)) + } + Inner::VarcharArray => { + &Kind::Array(Type(Inner::Varchar)) + } + Inner::Int8Array => { + &Kind::Array(Type(Inner::Int8)) + } + Inner::PointArray => { + &Kind::Array(Type(Inner::Point)) + } + Inner::LsegArray => { + &Kind::Array(Type(Inner::Lseg)) + } + Inner::PathArray => { + &Kind::Array(Type(Inner::Path)) + } + Inner::BoxArray => { + &Kind::Array(Type(Inner::Box)) + } + Inner::Float4Array => { + &Kind::Array(Type(Inner::Float4)) + } + Inner::Float8Array => { + &Kind::Array(Type(Inner::Float8)) + } + Inner::PolygonArray => { + &Kind::Array(Type(Inner::Polygon)) + } + Inner::OidArray => { + &Kind::Array(Type(Inner::Oid)) + } + Inner::Aclitem => { + &Kind::Simple + } + Inner::AclitemArray => { + &Kind::Array(Type(Inner::Aclitem)) + } + Inner::MacaddrArray => { + &Kind::Array(Type(Inner::Macaddr)) + } + Inner::InetArray => { + &Kind::Array(Type(Inner::Inet)) + } + Inner::Bpchar => { + &Kind::Simple + } + Inner::Varchar => { + &Kind::Simple + } + Inner::Date => { + &Kind::Simple + } + Inner::Time => { + &Kind::Simple + } + Inner::Timestamp => { + &Kind::Simple + } + Inner::TimestampArray => { + &Kind::Array(Type(Inner::Timestamp)) + } + Inner::DateArray => { + &Kind::Array(Type(Inner::Date)) + } + Inner::TimeArray => { + &Kind::Array(Type(Inner::Time)) + } + Inner::Timestamptz => { + &Kind::Simple + } + Inner::TimestamptzArray => { + &Kind::Array(Type(Inner::Timestamptz)) + } + Inner::Interval => { + &Kind::Simple + } + Inner::IntervalArray => { + &Kind::Array(Type(Inner::Interval)) + } + Inner::NumericArray => { + &Kind::Array(Type(Inner::Numeric)) + } + Inner::CstringArray => { + &Kind::Array(Type(Inner::Cstring)) + } + Inner::Timetz => { + &Kind::Simple + } + Inner::TimetzArray => { + &Kind::Array(Type(Inner::Timetz)) + } + Inner::Bit => { + &Kind::Simple + } + Inner::BitArray => { + &Kind::Array(Type(Inner::Bit)) + } + Inner::Varbit => { + &Kind::Simple + } + Inner::VarbitArray => { + &Kind::Array(Type(Inner::Varbit)) + } + Inner::Numeric => { + &Kind::Simple + } + Inner::Refcursor => { + &Kind::Simple + } + Inner::RefcursorArray => { + &Kind::Array(Type(Inner::Refcursor)) + } + Inner::Regprocedure => { + &Kind::Simple + } + Inner::Regoper => { + &Kind::Simple + } + Inner::Regoperator => { + &Kind::Simple + } + Inner::Regclass => { + &Kind::Simple + } + Inner::Regtype => { + &Kind::Simple + } + Inner::RegprocedureArray => { + &Kind::Array(Type(Inner::Regprocedure)) + } + Inner::RegoperArray => { + &Kind::Array(Type(Inner::Regoper)) + } + Inner::RegoperatorArray => { + &Kind::Array(Type(Inner::Regoperator)) + } + Inner::RegclassArray => { + &Kind::Array(Type(Inner::Regclass)) + } + Inner::RegtypeArray => { + &Kind::Array(Type(Inner::Regtype)) + } + Inner::Record => { + &Kind::Pseudo + } + Inner::Cstring => { + &Kind::Pseudo + } + Inner::Any => { + &Kind::Pseudo + } + Inner::Anyarray => { + &Kind::Pseudo + } + Inner::Void => { + &Kind::Pseudo + } + Inner::Trigger => { + &Kind::Pseudo + } + Inner::LanguageHandler => { + &Kind::Pseudo + } + Inner::Internal => { + &Kind::Pseudo + } + Inner::Anyelement => { + &Kind::Pseudo + } + Inner::RecordArray => { + &Kind::Pseudo + } + Inner::Anynonarray => { + &Kind::Pseudo + } + Inner::TxidSnapshotArray => { + &Kind::Array(Type(Inner::TxidSnapshot)) + } + Inner::Uuid => { + &Kind::Simple + } + Inner::UuidArray => { + &Kind::Array(Type(Inner::Uuid)) + } + Inner::TxidSnapshot => { + &Kind::Simple + } + Inner::FdwHandler => { + &Kind::Pseudo + } + Inner::PgLsn => { + &Kind::Simple + } + Inner::PgLsnArray => { + &Kind::Array(Type(Inner::PgLsn)) + } + Inner::TsmHandler => { + &Kind::Pseudo + } + Inner::PgNdistinct => { + &Kind::Simple + } + Inner::PgDependencies => { + &Kind::Simple + } + Inner::Anyenum => { + &Kind::Pseudo + } + Inner::TsVector => { + &Kind::Simple + } + Inner::Tsquery => { + &Kind::Simple + } + Inner::GtsVector => { + &Kind::Simple + } + Inner::TsVectorArray => { + &Kind::Array(Type(Inner::TsVector)) + } + Inner::GtsVectorArray => { + &Kind::Array(Type(Inner::GtsVector)) + } + Inner::TsqueryArray => { + &Kind::Array(Type(Inner::Tsquery)) + } + Inner::Regconfig => { + &Kind::Simple + } + Inner::RegconfigArray => { + &Kind::Array(Type(Inner::Regconfig)) + } + Inner::Regdictionary => { + &Kind::Simple + } + Inner::RegdictionaryArray => { + &Kind::Array(Type(Inner::Regdictionary)) + } + Inner::Jsonb => { + &Kind::Simple + } + Inner::JsonbArray => { + &Kind::Array(Type(Inner::Jsonb)) + } + Inner::AnyRange => { + &Kind::Pseudo + } + Inner::EventTrigger => { + &Kind::Pseudo + } + Inner::Int4Range => { + &Kind::Range(Type(Inner::Int4)) + } + Inner::Int4RangeArray => { + &Kind::Array(Type(Inner::Int4Range)) + } + Inner::NumRange => { + &Kind::Range(Type(Inner::Numeric)) + } + Inner::NumRangeArray => { + &Kind::Array(Type(Inner::NumRange)) + } + Inner::TsRange => { + &Kind::Range(Type(Inner::Timestamp)) + } + Inner::TsRangeArray => { + &Kind::Array(Type(Inner::TsRange)) + } + Inner::TstzRange => { + &Kind::Range(Type(Inner::Timestamptz)) + } + Inner::TstzRangeArray => { + &Kind::Array(Type(Inner::TstzRange)) + } + Inner::DateRange => { + &Kind::Range(Type(Inner::Date)) + } + Inner::DateRangeArray => { + &Kind::Array(Type(Inner::DateRange)) + } + Inner::Int8Range => { + &Kind::Range(Type(Inner::Int8)) + } + Inner::Int8RangeArray => { + &Kind::Array(Type(Inner::Int8Range)) + } + Inner::Jsonpath => { + &Kind::Simple + } + Inner::JsonpathArray => { + &Kind::Array(Type(Inner::Jsonpath)) + } + Inner::Regnamespace => { + &Kind::Simple + } + Inner::RegnamespaceArray => { + &Kind::Array(Type(Inner::Regnamespace)) + } + Inner::Regrole => { + &Kind::Simple + } + Inner::RegroleArray => { + &Kind::Array(Type(Inner::Regrole)) + } + Inner::Regcollation => { + &Kind::Simple + } + Inner::RegcollationArray => { + &Kind::Array(Type(Inner::Regcollation)) + } + Inner::Int4multiRange => { + &Kind::Range(Type(Inner::Int4)) + } + Inner::NummultiRange => { + &Kind::Range(Type(Inner::Numeric)) + } + Inner::TsmultiRange => { + &Kind::Range(Type(Inner::Timestamp)) + } + Inner::TstzmultiRange => { + &Kind::Range(Type(Inner::Timestamptz)) + } + Inner::DatemultiRange => { + &Kind::Range(Type(Inner::Date)) + } + Inner::Int8multiRange => { + &Kind::Range(Type(Inner::Int8)) + } + Inner::AnymultiRange => { + &Kind::Pseudo + } + Inner::AnycompatiblemultiRange => { + &Kind::Pseudo + } + Inner::PgBrinBloomSummary => { + &Kind::Simple + } + Inner::PgBrinMinmaxMultiSummary => { + &Kind::Simple + } + Inner::PgMcvList => { + &Kind::Simple + } + Inner::PgSnapshot => { + &Kind::Simple + } + Inner::PgSnapshotArray => { + &Kind::Array(Type(Inner::PgSnapshot)) + } + Inner::Xid8 => { + &Kind::Simple + } + Inner::Anycompatible => { + &Kind::Pseudo + } + Inner::Anycompatiblearray => { + &Kind::Pseudo + } + Inner::Anycompatiblenonarray => { + &Kind::Pseudo + } + Inner::AnycompatibleRange => { + &Kind::Pseudo + } + Inner::Int4multiRangeArray => { + &Kind::Array(Type(Inner::Int4multiRange)) + } + Inner::NummultiRangeArray => { + &Kind::Array(Type(Inner::NummultiRange)) + } + Inner::TsmultiRangeArray => { + &Kind::Array(Type(Inner::TsmultiRange)) + } + Inner::TstzmultiRangeArray => { + &Kind::Array(Type(Inner::TstzmultiRange)) + } + Inner::DatemultiRangeArray => { + &Kind::Array(Type(Inner::DatemultiRange)) + } + Inner::Int8multiRangeArray => { + &Kind::Array(Type(Inner::Int8multiRange)) + } Inner::Other(ref u) => &u.kind, } } @@ -874,6 +1308,16 @@ impl Inner { Inner::RegroleArray => "_regrole", Inner::Regcollation => "regcollation", Inner::RegcollationArray => "_regcollation", + Inner::Int4multiRange => "int4multirange", + Inner::NummultiRange => "nummultirange", + Inner::TsmultiRange => "tsmultirange", + Inner::TstzmultiRange => "tstzmultirange", + Inner::DatemultiRange => "datemultirange", + Inner::Int8multiRange => "int8multirange", + Inner::AnymultiRange => "anymultirange", + Inner::AnycompatiblemultiRange => "anycompatiblemultirange", + Inner::PgBrinBloomSummary => "pg_brin_bloom_summary", + Inner::PgBrinMinmaxMultiSummary => "pg_brin_minmax_multi_summary", Inner::PgMcvList => "pg_mcv_list", Inner::PgSnapshot => "pg_snapshot", Inner::PgSnapshotArray => "_pg_snapshot", @@ -882,6 +1326,12 @@ impl Inner { Inner::Anycompatiblearray => "anycompatiblearray", Inner::Anycompatiblenonarray => "anycompatiblenonarray", Inner::AnycompatibleRange => "anycompatiblerange", + Inner::Int4multiRangeArray => "_int4multirange", + Inner::NummultiRangeArray => "_nummultirange", + Inner::TsmultiRangeArray => "_tsmultirange", + Inner::TstzmultiRangeArray => "_tstzmultirange", + Inner::DatemultiRangeArray => "_datemultirange", + Inner::Int8multiRangeArray => "_int8multirange", Inner::Other(ref u) => &u.name, } } @@ -1370,6 +1820,36 @@ impl Type { /// REGCOLLATION[] pub const REGCOLLATION_ARRAY: Type = Type(Inner::RegcollationArray); + /// INT4MULTIRANGE - multirange of integers + pub const INT4MULTI_RANGE: Type = Type(Inner::Int4multiRange); + + /// NUMMULTIRANGE - multirange of numerics + pub const NUMMULTI_RANGE: Type = Type(Inner::NummultiRange); + + /// TSMULTIRANGE - multirange of timestamps without time zone + pub const TSMULTI_RANGE: Type = Type(Inner::TsmultiRange); + + /// TSTZMULTIRANGE - multirange of timestamps with time zone + pub const TSTZMULTI_RANGE: Type = Type(Inner::TstzmultiRange); + + /// DATEMULTIRANGE - multirange of dates + pub const DATEMULTI_RANGE: Type = Type(Inner::DatemultiRange); + + /// INT8MULTIRANGE - multirange of bigints + pub const INT8MULTI_RANGE: Type = Type(Inner::Int8multiRange); + + /// ANYMULTIRANGE - pseudo-type representing a polymorphic base type that is a multirange + pub const ANYMULTI_RANGE: Type = Type(Inner::AnymultiRange); + + /// ANYCOMPATIBLEMULTIRANGE - pseudo-type representing a multirange over a polymorphic common type + pub const ANYCOMPATIBLEMULTI_RANGE: Type = Type(Inner::AnycompatiblemultiRange); + + /// PG_BRIN_BLOOM_SUMMARY - BRIN bloom summary + pub const PG_BRIN_BLOOM_SUMMARY: Type = Type(Inner::PgBrinBloomSummary); + + /// PG_BRIN_MINMAX_MULTI_SUMMARY - BRIN minmax-multi summary + pub const PG_BRIN_MINMAX_MULTI_SUMMARY: Type = Type(Inner::PgBrinMinmaxMultiSummary); + /// PG_MCV_LIST - multivariate MCV list pub const PG_MCV_LIST: Type = Type(Inner::PgMcvList); @@ -1393,4 +1873,22 @@ impl Type { /// ANYCOMPATIBLERANGE - pseudo-type representing a range over a polymorphic common type pub const ANYCOMPATIBLE_RANGE: Type = Type(Inner::AnycompatibleRange); -} + + /// INT4MULTIRANGE[] + pub const INT4MULTI_RANGE_ARRAY: Type = Type(Inner::Int4multiRangeArray); + + /// NUMMULTIRANGE[] + pub const NUMMULTI_RANGE_ARRAY: Type = Type(Inner::NummultiRangeArray); + + /// TSMULTIRANGE[] + pub const TSMULTI_RANGE_ARRAY: Type = Type(Inner::TsmultiRangeArray); + + /// TSTZMULTIRANGE[] + pub const TSTZMULTI_RANGE_ARRAY: Type = Type(Inner::TstzmultiRangeArray); + + /// DATEMULTIRANGE[] + pub const DATEMULTI_RANGE_ARRAY: Type = Type(Inner::DatemultiRangeArray); + + /// INT8MULTIRANGE[] + pub const INT8MULTI_RANGE_ARRAY: Type = Type(Inner::Int8multiRangeArray); +} \ No newline at end of file diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 4eb1ae2d0..6f191fc16 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -114,6 +114,7 @@ impl SqlState { Inner::E2203D => "2203D", Inner::E2203E => "2203E", Inner::E2203F => "2203F", + Inner::E2203G => "2203G", Inner::E23000 => "23000", Inner::E23001 => "23001", Inner::E23502 => "23502", @@ -278,7 +279,7 @@ impl SqlState { Inner::Other(code) => code, } } - + /// 00000 pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Inner::E00000); @@ -364,8 +365,7 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Inner::E0Z000); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = - SqlState(Inner::E0Z002); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Inner::E0Z002); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Inner::E20000); @@ -580,6 +580,9 @@ impl SqlState { /// 2203F pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Inner::E2203F); + /// 2203G + pub const SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE: SqlState = SqlState(Inner::E2203G); + /// 23000 pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Inner::E23000); @@ -620,8 +623,7 @@ impl SqlState { pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25003); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Inner::E25004); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25004); /// 25005 pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25005); @@ -1178,6 +1180,7 @@ enum Inner { E2203D, E2203E, E2203F, + E2203G, E23000, E23001, E23502, @@ -1341,324 +1344,325 @@ enum Inner { EXX002, Other(Box), } - + #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { key: 12913932095322966823, disps: &[ + (0, 24), (0, 12), - (0, 18), - (0, 25), - (0, 109), - (0, 147), (0, 74), - (0, 0), - (7, 117), - (5, 221), - (0, 26), - (1, 45), - (0, 93), - (0, 25), - (0, 61), - (1, 221), - (10, 17), - (0, 77), - (2, 3), - (0, 216), - (0, 0), - (0, 1), - (1, 168), - (0, 64), - (0, 2), - (0, 7), - (1, 37), - (0, 83), - (3, 24), - (0, 0), (0, 109), - (18, 9), - (1, 230), + (0, 11), + (0, 9), (0, 0), - (0, 4), + (4, 38), + (3, 155), + (0, 6), + (1, 242), + (0, 66), + (0, 53), + (5, 180), + (3, 221), + (7, 230), + (0, 125), + (1, 46), + (0, 11), + (1, 2), + (0, 5), + (0, 13), (0, 171), + (0, 15), + (0, 4), + (0, 22), + (1, 85), + (0, 75), + (2, 0), + (1, 25), + (7, 47), + (0, 45), + (0, 35), + (0, 7), + (7, 124), (0, 0), - (34, 97), - (2, 126), - (44, 49), - (5, 182), - (0, 1), + (14, 104), + (1, 183), + (61, 50), + (3, 76), + (0, 12), + (0, 7), + (4, 189), (0, 1), - (0, 71), - (0, 4), - (5, 164), + (64, 102), (0, 0), - (0, 96), - (13, 58), - (0, 58), - (0, 242), - (0, 72), - (16, 53), + (16, 192), + (24, 19), + (0, 5), + (0, 87), + (0, 89), + (0, 14), ], entries: &[ - ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ("42703", SqlState::UNDEFINED_COLUMN), - ("42P07", SqlState::DUPLICATE_TABLE), - ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("22000", SqlState::DATA_EXCEPTION), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("28P01", SqlState::INVALID_PASSWORD), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("02000", SqlState::NO_DATA), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), - ("42601", SqlState::SYNTAX_ERROR), - ("22012", SqlState::DIVISION_BY_ZERO), - ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("P0003", SqlState::TOO_MANY_ROWS), - ("57P04", SqlState::DATABASE_DROPPED), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("20000", SqlState::CASE_NOT_FOUND), + ("08000", SqlState::CONNECTION_EXCEPTION), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), + ("42710", SqlState::DUPLICATE_OBJECT), + ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("42939", SqlState::RESERVED_NAME), - ("42602", SqlState::INVALID_NAME), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("55006", SqlState::OBJECT_IN_USE), + ("53200", SqlState::OUT_OF_MEMORY), + ("22012", SqlState::DIVISION_BY_ZERO), + ("P0002", SqlState::NO_DATA_FOUND), + ("XX001", SqlState::DATA_CORRUPTED), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), + ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), + ("42804", SqlState::DATATYPE_MISMATCH), + ("42803", SqlState::GROUPING_ERROR), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), + ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), + ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("23505", SqlState::UNIQUE_VIOLATION), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("23P01", SqlState::EXCLUSION_VIOLATION), + ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), + ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), + ("P0000", SqlState::PLPGSQL_ERROR), ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), - ("53100", SqlState::DISK_FULL), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), + ("0A000", SqlState::FEATURE_NOT_SUPPORTED), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("42712", SqlState::DUPLICATE_ALIAS), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("58030", SqlState::IO_ERROR), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("20000", SqlState::CASE_NOT_FOUND), + ("2203G", SqlState::SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("42712", SqlState::DUPLICATE_ALIAS), - ("42622", SqlState::NAME_TOO_LONG), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("01000", SqlState::WARNING), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("57P01", SqlState::ADMIN_SHUTDOWN), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("P0004", SqlState::ASSERT_FAILURE), ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("22032", SqlState::INVALID_JSON_TEXT), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), - ("428C9", SqlState::GENERATED_ALWAYS), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("42846", SqlState::CANNOT_COERCE), - ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("42704", SqlState::UNDEFINED_OBJECT), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("0L000", SqlState::INVALID_GRANTOR), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), + ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), + ("58P02", SqlState::DUPLICATE_FILE), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("53200", SqlState::OUT_OF_MEMORY), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("XX000", SqlState::INTERNAL_ERROR), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("23505", SqlState::UNIQUE_VIOLATION), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("F0000", SqlState::CONFIG_FILE_ERROR), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), + ("22008", SqlState::DATETIME_FIELD_OVERFLOW), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("42P20", SqlState::WINDOWING_ERROR), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("42702", SqlState::AMBIGUOUS_COLUMN), + ("02000", SqlState::NO_DATA), ("54011", SqlState::TOO_MANY_COLUMNS), - ("XX002", SqlState::INDEX_CORRUPTED), - ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("42701", SqlState::DUPLICATE_COLUMN), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("42622", SqlState::NAME_TOO_LONG), + ("P0003", SqlState::TOO_MANY_ROWS), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("23001", SqlState::RESTRICT_VIOLATION), + ("57000", SqlState::OPERATOR_INTERVENTION), + ("22027", SqlState::TRIM_ERROR), ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), + ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("428C9", SqlState::GENERATED_ALWAYS), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("53100", SqlState::DISK_FULL), + ("42601", SqlState::SYNTAX_ERROR), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("55006", SqlState::OBJECT_IN_USE), - ("42P01", SqlState::UNDEFINED_TABLE), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("57P05", SqlState::IDLE_SESSION_TIMEOUT), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("01000", SqlState::WARNING), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("57P01", SqlState::ADMIN_SHUTDOWN), ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("P0004", SqlState::ASSERT_FAILURE), - ("58000", SqlState::SYSTEM_ERROR), - ("42P21", SqlState::COLLATION_MISMATCH), - ("57P02", SqlState::CRASH_SHUTDOWN), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), - ("28P01", SqlState::INVALID_PASSWORD), - ("22011", SqlState::SUBSTRING_ERROR), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("08006", SqlState::CONNECTION_FAILURE), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("42803", SqlState::GROUPING_ERROR), ("00000", SqlState::SUCCESSFUL_COMPLETION), + ("55P03", SqlState::LOCK_NOT_AVAILABLE), + ("42P01", SqlState::UNDEFINED_TABLE), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("XX002", SqlState::INDEX_CORRUPTED), ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("P0001", SqlState::RAISE_EXCEPTION), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("22032", SqlState::INVALID_JSON_TEXT), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("42P19", SqlState::INVALID_RECURSION), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("XX000", SqlState::INTERNAL_ERROR), + ("08006", SqlState::CONNECTION_FAILURE), + ("57P04", SqlState::DATABASE_DROPPED), + ("42P07", SqlState::DUPLICATE_TABLE), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("42P21", SqlState::COLLATION_MISMATCH), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("22011", SqlState::SUBSTRING_ERROR), + ("42602", SqlState::INVALID_NAME), ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("42P03", SqlState::DUPLICATE_CURSOR), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("0L000", SqlState::INVALID_GRANTOR), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), ("58P01", SqlState::UNDEFINED_FILE), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("22000", SqlState::DATA_EXCEPTION), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("42P19", SqlState::INVALID_RECURSION), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), + ("42703", SqlState::UNDEFINED_COLUMN), + ("57P05", SqlState::IDLE_SESSION_TIMEOUT), + ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("23514", SqlState::CHECK_VIOLATION), + ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), - ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("53000", SqlState::INSUFFICIENT_RESOURCES), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("P0000", SqlState::PLPGSQL_ERROR), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), - ("42710", SqlState::DUPLICATE_OBJECT), - ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), ("HV000", SqlState::FDW_ERROR), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("23502", SqlState::NOT_NULL_VIOLATION), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("57014", SqlState::QUERY_CANCELED), - ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("42701", SqlState::DUPLICATE_COLUMN), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("22027", SqlState::TRIM_ERROR), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("P0001", SqlState::RAISE_EXCEPTION), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), ("3D000", SqlState::INVALID_CATALOG_NAME), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("08000", SqlState::CONNECTION_EXCEPTION), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("XX001", SqlState::DATA_CORRUPTED), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("23514", SqlState::CHECK_VIOLATION), - ("42809", SqlState::WRONG_OBJECT_TYPE), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("23001", SqlState::RESTRICT_VIOLATION), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("58P02", SqlState::DUPLICATE_FILE), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("42939", SqlState::RESERVED_NAME), + ("58000", SqlState::SYSTEM_ERROR), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("57014", SqlState::QUERY_CANCELED), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("42P20", SqlState::WINDOWING_ERROR), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("58030", SqlState::IO_ERROR), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("P0002", SqlState::NO_DATA_FOUND), - ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), + ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("42723", SqlState::DUPLICATE_FUNCTION), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("42804", SqlState::DATATYPE_MISMATCH), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("42846", SqlState::CANNOT_COERCE), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("42704", SqlState::UNDEFINED_OBJECT), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("53000", SqlState::INSUFFICIENT_RESOURCES), ], }; From 0c86f9362fe98a584fb80828650b22417bb661a2 Mon Sep 17 00:00:00 2001 From: Joseph Koshakow Date: Mon, 21 Nov 2022 16:52:58 -0500 Subject: [PATCH 625/819] Add multirange kind and run cargo fmt --- codegen/src/type_gen.rs | 24 +- postgres-types/src/lib.rs | 2 + postgres-types/src/type_gen.rs | 744 +++++++-------------------- tokio-postgres/src/error/sqlstate.rs | 10 +- 4 files changed, 216 insertions(+), 564 deletions(-) diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 249c5530a..fd7a56450 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -17,6 +17,7 @@ struct Type { variant: String, ident: String, kind: String, + typtype: Option, element: u32, doc: String, } @@ -217,12 +218,18 @@ fn parse_types() -> BTreeMap { continue; } + let typtype = raw_type.get("typtype").cloned(); + let element = match &*kind { - "R" => match &*raw_type["typtype"] { + "R" => match typtype + .as_ref() + .expect("range type must have typtype") + .as_str() + { "r" => range_elements[&oid], "m" => multi_range_elements[&oid], typtype => panic!("invalid range typtype {}", typtype), - } + }, "A" => oids_by_name[&raw_type["typelem"]], _ => 0, }; @@ -248,6 +255,7 @@ fn parse_types() -> BTreeMap { variant, ident, kind: "A".to_string(), + typtype: None, element: oid, doc, }; @@ -259,6 +267,7 @@ fn parse_types() -> BTreeMap { variant, ident, kind, + typtype, element, doc, }; @@ -362,7 +371,16 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { let kind = match &*type_.kind { "P" => "Pseudo".to_owned(), "A" => format!("Array(Type(Inner::{}))", types[&type_.element].variant), - "R" => format!("Range(Type(Inner::{}))", types[&type_.element].variant), + "R" => match type_ + .typtype + .as_ref() + .expect("range type must have typtype") + .as_str() + { + "r" => format!("Range(Type(Inner::{}))", types[&type_.element].variant), + "m" => format!("Multirange(Type(Inner::{}))", types[&type_.element].variant), + typtype => panic!("invalid range typtype {}", typtype), + }, _ => "Simple".to_owned(), }; diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index ca4233f8a..fa49d99eb 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -320,6 +320,8 @@ pub enum Kind { Array(Type), /// A range type along with the type of its elements. Range(Type), + /// A multirange type along with the type of its elements. + Multirange(Type), /// A domain type along with its underlying type. Domain(Type), /// A composite type along with information about its fields. diff --git a/postgres-types/src/type_gen.rs b/postgres-types/src/type_gen.rs index 901fb46e0..a1bc3f85c 100644 --- a/postgres-types/src/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -1,7 +1,7 @@ // Autogenerated file - DO NOT EDIT use std::sync::Arc; -use crate::{Type, Oid, Kind}; +use crate::{Kind, Oid, Type}; #[derive(PartialEq, Eq, Debug, Hash)] pub struct Other { @@ -586,561 +586,191 @@ impl Inner { pub fn kind(&self) -> &Kind { match *self { - Inner::Bool => { - &Kind::Simple - } - Inner::Bytea => { - &Kind::Simple - } - Inner::Char => { - &Kind::Simple - } - Inner::Name => { - &Kind::Simple - } - Inner::Int8 => { - &Kind::Simple - } - Inner::Int2 => { - &Kind::Simple - } - Inner::Int2Vector => { - &Kind::Array(Type(Inner::Int2)) - } - Inner::Int4 => { - &Kind::Simple - } - Inner::Regproc => { - &Kind::Simple - } - Inner::Text => { - &Kind::Simple - } - Inner::Oid => { - &Kind::Simple - } - Inner::Tid => { - &Kind::Simple - } - Inner::Xid => { - &Kind::Simple - } - Inner::Cid => { - &Kind::Simple - } - Inner::OidVector => { - &Kind::Array(Type(Inner::Oid)) - } - Inner::PgDdlCommand => { - &Kind::Pseudo - } - Inner::Json => { - &Kind::Simple - } - Inner::Xml => { - &Kind::Simple - } - Inner::XmlArray => { - &Kind::Array(Type(Inner::Xml)) - } - Inner::PgNodeTree => { - &Kind::Simple - } - Inner::JsonArray => { - &Kind::Array(Type(Inner::Json)) - } - Inner::TableAmHandler => { - &Kind::Pseudo - } - Inner::Xid8Array => { - &Kind::Array(Type(Inner::Xid8)) - } - Inner::IndexAmHandler => { - &Kind::Pseudo - } - Inner::Point => { - &Kind::Simple - } - Inner::Lseg => { - &Kind::Simple - } - Inner::Path => { - &Kind::Simple - } - Inner::Box => { - &Kind::Simple - } - Inner::Polygon => { - &Kind::Simple - } - Inner::Line => { - &Kind::Simple - } - Inner::LineArray => { - &Kind::Array(Type(Inner::Line)) - } - Inner::Cidr => { - &Kind::Simple - } - Inner::CidrArray => { - &Kind::Array(Type(Inner::Cidr)) - } - Inner::Float4 => { - &Kind::Simple - } - Inner::Float8 => { - &Kind::Simple - } - Inner::Unknown => { - &Kind::Simple - } - Inner::Circle => { - &Kind::Simple - } - Inner::CircleArray => { - &Kind::Array(Type(Inner::Circle)) - } - Inner::Macaddr8 => { - &Kind::Simple - } - Inner::Macaddr8Array => { - &Kind::Array(Type(Inner::Macaddr8)) - } - Inner::Money => { - &Kind::Simple - } - Inner::MoneyArray => { - &Kind::Array(Type(Inner::Money)) - } - Inner::Macaddr => { - &Kind::Simple - } - Inner::Inet => { - &Kind::Simple - } - Inner::BoolArray => { - &Kind::Array(Type(Inner::Bool)) - } - Inner::ByteaArray => { - &Kind::Array(Type(Inner::Bytea)) - } - Inner::CharArray => { - &Kind::Array(Type(Inner::Char)) - } - Inner::NameArray => { - &Kind::Array(Type(Inner::Name)) - } - Inner::Int2Array => { - &Kind::Array(Type(Inner::Int2)) - } - Inner::Int2VectorArray => { - &Kind::Array(Type(Inner::Int2Vector)) - } - Inner::Int4Array => { - &Kind::Array(Type(Inner::Int4)) - } - Inner::RegprocArray => { - &Kind::Array(Type(Inner::Regproc)) - } - Inner::TextArray => { - &Kind::Array(Type(Inner::Text)) - } - Inner::TidArray => { - &Kind::Array(Type(Inner::Tid)) - } - Inner::XidArray => { - &Kind::Array(Type(Inner::Xid)) - } - Inner::CidArray => { - &Kind::Array(Type(Inner::Cid)) - } - Inner::OidVectorArray => { - &Kind::Array(Type(Inner::OidVector)) - } - Inner::BpcharArray => { - &Kind::Array(Type(Inner::Bpchar)) - } - Inner::VarcharArray => { - &Kind::Array(Type(Inner::Varchar)) - } - Inner::Int8Array => { - &Kind::Array(Type(Inner::Int8)) - } - Inner::PointArray => { - &Kind::Array(Type(Inner::Point)) - } - Inner::LsegArray => { - &Kind::Array(Type(Inner::Lseg)) - } - Inner::PathArray => { - &Kind::Array(Type(Inner::Path)) - } - Inner::BoxArray => { - &Kind::Array(Type(Inner::Box)) - } - Inner::Float4Array => { - &Kind::Array(Type(Inner::Float4)) - } - Inner::Float8Array => { - &Kind::Array(Type(Inner::Float8)) - } - Inner::PolygonArray => { - &Kind::Array(Type(Inner::Polygon)) - } - Inner::OidArray => { - &Kind::Array(Type(Inner::Oid)) - } - Inner::Aclitem => { - &Kind::Simple - } - Inner::AclitemArray => { - &Kind::Array(Type(Inner::Aclitem)) - } - Inner::MacaddrArray => { - &Kind::Array(Type(Inner::Macaddr)) - } - Inner::InetArray => { - &Kind::Array(Type(Inner::Inet)) - } - Inner::Bpchar => { - &Kind::Simple - } - Inner::Varchar => { - &Kind::Simple - } - Inner::Date => { - &Kind::Simple - } - Inner::Time => { - &Kind::Simple - } - Inner::Timestamp => { - &Kind::Simple - } - Inner::TimestampArray => { - &Kind::Array(Type(Inner::Timestamp)) - } - Inner::DateArray => { - &Kind::Array(Type(Inner::Date)) - } - Inner::TimeArray => { - &Kind::Array(Type(Inner::Time)) - } - Inner::Timestamptz => { - &Kind::Simple - } - Inner::TimestamptzArray => { - &Kind::Array(Type(Inner::Timestamptz)) - } - Inner::Interval => { - &Kind::Simple - } - Inner::IntervalArray => { - &Kind::Array(Type(Inner::Interval)) - } - Inner::NumericArray => { - &Kind::Array(Type(Inner::Numeric)) - } - Inner::CstringArray => { - &Kind::Array(Type(Inner::Cstring)) - } - Inner::Timetz => { - &Kind::Simple - } - Inner::TimetzArray => { - &Kind::Array(Type(Inner::Timetz)) - } - Inner::Bit => { - &Kind::Simple - } - Inner::BitArray => { - &Kind::Array(Type(Inner::Bit)) - } - Inner::Varbit => { - &Kind::Simple - } - Inner::VarbitArray => { - &Kind::Array(Type(Inner::Varbit)) - } - Inner::Numeric => { - &Kind::Simple - } - Inner::Refcursor => { - &Kind::Simple - } - Inner::RefcursorArray => { - &Kind::Array(Type(Inner::Refcursor)) - } - Inner::Regprocedure => { - &Kind::Simple - } - Inner::Regoper => { - &Kind::Simple - } - Inner::Regoperator => { - &Kind::Simple - } - Inner::Regclass => { - &Kind::Simple - } - Inner::Regtype => { - &Kind::Simple - } - Inner::RegprocedureArray => { - &Kind::Array(Type(Inner::Regprocedure)) - } - Inner::RegoperArray => { - &Kind::Array(Type(Inner::Regoper)) - } - Inner::RegoperatorArray => { - &Kind::Array(Type(Inner::Regoperator)) - } - Inner::RegclassArray => { - &Kind::Array(Type(Inner::Regclass)) - } - Inner::RegtypeArray => { - &Kind::Array(Type(Inner::Regtype)) - } - Inner::Record => { - &Kind::Pseudo - } - Inner::Cstring => { - &Kind::Pseudo - } - Inner::Any => { - &Kind::Pseudo - } - Inner::Anyarray => { - &Kind::Pseudo - } - Inner::Void => { - &Kind::Pseudo - } - Inner::Trigger => { - &Kind::Pseudo - } - Inner::LanguageHandler => { - &Kind::Pseudo - } - Inner::Internal => { - &Kind::Pseudo - } - Inner::Anyelement => { - &Kind::Pseudo - } - Inner::RecordArray => { - &Kind::Pseudo - } - Inner::Anynonarray => { - &Kind::Pseudo - } - Inner::TxidSnapshotArray => { - &Kind::Array(Type(Inner::TxidSnapshot)) - } - Inner::Uuid => { - &Kind::Simple - } - Inner::UuidArray => { - &Kind::Array(Type(Inner::Uuid)) - } - Inner::TxidSnapshot => { - &Kind::Simple - } - Inner::FdwHandler => { - &Kind::Pseudo - } - Inner::PgLsn => { - &Kind::Simple - } - Inner::PgLsnArray => { - &Kind::Array(Type(Inner::PgLsn)) - } - Inner::TsmHandler => { - &Kind::Pseudo - } - Inner::PgNdistinct => { - &Kind::Simple - } - Inner::PgDependencies => { - &Kind::Simple - } - Inner::Anyenum => { - &Kind::Pseudo - } - Inner::TsVector => { - &Kind::Simple - } - Inner::Tsquery => { - &Kind::Simple - } - Inner::GtsVector => { - &Kind::Simple - } - Inner::TsVectorArray => { - &Kind::Array(Type(Inner::TsVector)) - } - Inner::GtsVectorArray => { - &Kind::Array(Type(Inner::GtsVector)) - } - Inner::TsqueryArray => { - &Kind::Array(Type(Inner::Tsquery)) - } - Inner::Regconfig => { - &Kind::Simple - } - Inner::RegconfigArray => { - &Kind::Array(Type(Inner::Regconfig)) - } - Inner::Regdictionary => { - &Kind::Simple - } - Inner::RegdictionaryArray => { - &Kind::Array(Type(Inner::Regdictionary)) - } - Inner::Jsonb => { - &Kind::Simple - } - Inner::JsonbArray => { - &Kind::Array(Type(Inner::Jsonb)) - } - Inner::AnyRange => { - &Kind::Pseudo - } - Inner::EventTrigger => { - &Kind::Pseudo - } - Inner::Int4Range => { - &Kind::Range(Type(Inner::Int4)) - } - Inner::Int4RangeArray => { - &Kind::Array(Type(Inner::Int4Range)) - } - Inner::NumRange => { - &Kind::Range(Type(Inner::Numeric)) - } - Inner::NumRangeArray => { - &Kind::Array(Type(Inner::NumRange)) - } - Inner::TsRange => { - &Kind::Range(Type(Inner::Timestamp)) - } - Inner::TsRangeArray => { - &Kind::Array(Type(Inner::TsRange)) - } - Inner::TstzRange => { - &Kind::Range(Type(Inner::Timestamptz)) - } - Inner::TstzRangeArray => { - &Kind::Array(Type(Inner::TstzRange)) - } - Inner::DateRange => { - &Kind::Range(Type(Inner::Date)) - } - Inner::DateRangeArray => { - &Kind::Array(Type(Inner::DateRange)) - } - Inner::Int8Range => { - &Kind::Range(Type(Inner::Int8)) - } - Inner::Int8RangeArray => { - &Kind::Array(Type(Inner::Int8Range)) - } - Inner::Jsonpath => { - &Kind::Simple - } - Inner::JsonpathArray => { - &Kind::Array(Type(Inner::Jsonpath)) - } - Inner::Regnamespace => { - &Kind::Simple - } - Inner::RegnamespaceArray => { - &Kind::Array(Type(Inner::Regnamespace)) - } - Inner::Regrole => { - &Kind::Simple - } - Inner::RegroleArray => { - &Kind::Array(Type(Inner::Regrole)) - } - Inner::Regcollation => { - &Kind::Simple - } - Inner::RegcollationArray => { - &Kind::Array(Type(Inner::Regcollation)) - } - Inner::Int4multiRange => { - &Kind::Range(Type(Inner::Int4)) - } - Inner::NummultiRange => { - &Kind::Range(Type(Inner::Numeric)) - } - Inner::TsmultiRange => { - &Kind::Range(Type(Inner::Timestamp)) - } - Inner::TstzmultiRange => { - &Kind::Range(Type(Inner::Timestamptz)) - } - Inner::DatemultiRange => { - &Kind::Range(Type(Inner::Date)) - } - Inner::Int8multiRange => { - &Kind::Range(Type(Inner::Int8)) - } - Inner::AnymultiRange => { - &Kind::Pseudo - } - Inner::AnycompatiblemultiRange => { - &Kind::Pseudo - } - Inner::PgBrinBloomSummary => { - &Kind::Simple - } - Inner::PgBrinMinmaxMultiSummary => { - &Kind::Simple - } - Inner::PgMcvList => { - &Kind::Simple - } - Inner::PgSnapshot => { - &Kind::Simple - } - Inner::PgSnapshotArray => { - &Kind::Array(Type(Inner::PgSnapshot)) - } - Inner::Xid8 => { - &Kind::Simple - } - Inner::Anycompatible => { - &Kind::Pseudo - } - Inner::Anycompatiblearray => { - &Kind::Pseudo - } - Inner::Anycompatiblenonarray => { - &Kind::Pseudo - } - Inner::AnycompatibleRange => { - &Kind::Pseudo - } - Inner::Int4multiRangeArray => { - &Kind::Array(Type(Inner::Int4multiRange)) - } - Inner::NummultiRangeArray => { - &Kind::Array(Type(Inner::NummultiRange)) - } - Inner::TsmultiRangeArray => { - &Kind::Array(Type(Inner::TsmultiRange)) - } - Inner::TstzmultiRangeArray => { - &Kind::Array(Type(Inner::TstzmultiRange)) - } - Inner::DatemultiRangeArray => { - &Kind::Array(Type(Inner::DatemultiRange)) - } - Inner::Int8multiRangeArray => { - &Kind::Array(Type(Inner::Int8multiRange)) - } + Inner::Bool => &Kind::Simple, + Inner::Bytea => &Kind::Simple, + Inner::Char => &Kind::Simple, + Inner::Name => &Kind::Simple, + Inner::Int8 => &Kind::Simple, + Inner::Int2 => &Kind::Simple, + Inner::Int2Vector => &Kind::Array(Type(Inner::Int2)), + Inner::Int4 => &Kind::Simple, + Inner::Regproc => &Kind::Simple, + Inner::Text => &Kind::Simple, + Inner::Oid => &Kind::Simple, + Inner::Tid => &Kind::Simple, + Inner::Xid => &Kind::Simple, + Inner::Cid => &Kind::Simple, + Inner::OidVector => &Kind::Array(Type(Inner::Oid)), + Inner::PgDdlCommand => &Kind::Pseudo, + Inner::Json => &Kind::Simple, + Inner::Xml => &Kind::Simple, + Inner::XmlArray => &Kind::Array(Type(Inner::Xml)), + Inner::PgNodeTree => &Kind::Simple, + Inner::JsonArray => &Kind::Array(Type(Inner::Json)), + Inner::TableAmHandler => &Kind::Pseudo, + Inner::Xid8Array => &Kind::Array(Type(Inner::Xid8)), + Inner::IndexAmHandler => &Kind::Pseudo, + Inner::Point => &Kind::Simple, + Inner::Lseg => &Kind::Simple, + Inner::Path => &Kind::Simple, + Inner::Box => &Kind::Simple, + Inner::Polygon => &Kind::Simple, + Inner::Line => &Kind::Simple, + Inner::LineArray => &Kind::Array(Type(Inner::Line)), + Inner::Cidr => &Kind::Simple, + Inner::CidrArray => &Kind::Array(Type(Inner::Cidr)), + Inner::Float4 => &Kind::Simple, + Inner::Float8 => &Kind::Simple, + Inner::Unknown => &Kind::Simple, + Inner::Circle => &Kind::Simple, + Inner::CircleArray => &Kind::Array(Type(Inner::Circle)), + Inner::Macaddr8 => &Kind::Simple, + Inner::Macaddr8Array => &Kind::Array(Type(Inner::Macaddr8)), + Inner::Money => &Kind::Simple, + Inner::MoneyArray => &Kind::Array(Type(Inner::Money)), + Inner::Macaddr => &Kind::Simple, + Inner::Inet => &Kind::Simple, + Inner::BoolArray => &Kind::Array(Type(Inner::Bool)), + Inner::ByteaArray => &Kind::Array(Type(Inner::Bytea)), + Inner::CharArray => &Kind::Array(Type(Inner::Char)), + Inner::NameArray => &Kind::Array(Type(Inner::Name)), + Inner::Int2Array => &Kind::Array(Type(Inner::Int2)), + Inner::Int2VectorArray => &Kind::Array(Type(Inner::Int2Vector)), + Inner::Int4Array => &Kind::Array(Type(Inner::Int4)), + Inner::RegprocArray => &Kind::Array(Type(Inner::Regproc)), + Inner::TextArray => &Kind::Array(Type(Inner::Text)), + Inner::TidArray => &Kind::Array(Type(Inner::Tid)), + Inner::XidArray => &Kind::Array(Type(Inner::Xid)), + Inner::CidArray => &Kind::Array(Type(Inner::Cid)), + Inner::OidVectorArray => &Kind::Array(Type(Inner::OidVector)), + Inner::BpcharArray => &Kind::Array(Type(Inner::Bpchar)), + Inner::VarcharArray => &Kind::Array(Type(Inner::Varchar)), + Inner::Int8Array => &Kind::Array(Type(Inner::Int8)), + Inner::PointArray => &Kind::Array(Type(Inner::Point)), + Inner::LsegArray => &Kind::Array(Type(Inner::Lseg)), + Inner::PathArray => &Kind::Array(Type(Inner::Path)), + Inner::BoxArray => &Kind::Array(Type(Inner::Box)), + Inner::Float4Array => &Kind::Array(Type(Inner::Float4)), + Inner::Float8Array => &Kind::Array(Type(Inner::Float8)), + Inner::PolygonArray => &Kind::Array(Type(Inner::Polygon)), + Inner::OidArray => &Kind::Array(Type(Inner::Oid)), + Inner::Aclitem => &Kind::Simple, + Inner::AclitemArray => &Kind::Array(Type(Inner::Aclitem)), + Inner::MacaddrArray => &Kind::Array(Type(Inner::Macaddr)), + Inner::InetArray => &Kind::Array(Type(Inner::Inet)), + Inner::Bpchar => &Kind::Simple, + Inner::Varchar => &Kind::Simple, + Inner::Date => &Kind::Simple, + Inner::Time => &Kind::Simple, + Inner::Timestamp => &Kind::Simple, + Inner::TimestampArray => &Kind::Array(Type(Inner::Timestamp)), + Inner::DateArray => &Kind::Array(Type(Inner::Date)), + Inner::TimeArray => &Kind::Array(Type(Inner::Time)), + Inner::Timestamptz => &Kind::Simple, + Inner::TimestamptzArray => &Kind::Array(Type(Inner::Timestamptz)), + Inner::Interval => &Kind::Simple, + Inner::IntervalArray => &Kind::Array(Type(Inner::Interval)), + Inner::NumericArray => &Kind::Array(Type(Inner::Numeric)), + Inner::CstringArray => &Kind::Array(Type(Inner::Cstring)), + Inner::Timetz => &Kind::Simple, + Inner::TimetzArray => &Kind::Array(Type(Inner::Timetz)), + Inner::Bit => &Kind::Simple, + Inner::BitArray => &Kind::Array(Type(Inner::Bit)), + Inner::Varbit => &Kind::Simple, + Inner::VarbitArray => &Kind::Array(Type(Inner::Varbit)), + Inner::Numeric => &Kind::Simple, + Inner::Refcursor => &Kind::Simple, + Inner::RefcursorArray => &Kind::Array(Type(Inner::Refcursor)), + Inner::Regprocedure => &Kind::Simple, + Inner::Regoper => &Kind::Simple, + Inner::Regoperator => &Kind::Simple, + Inner::Regclass => &Kind::Simple, + Inner::Regtype => &Kind::Simple, + Inner::RegprocedureArray => &Kind::Array(Type(Inner::Regprocedure)), + Inner::RegoperArray => &Kind::Array(Type(Inner::Regoper)), + Inner::RegoperatorArray => &Kind::Array(Type(Inner::Regoperator)), + Inner::RegclassArray => &Kind::Array(Type(Inner::Regclass)), + Inner::RegtypeArray => &Kind::Array(Type(Inner::Regtype)), + Inner::Record => &Kind::Pseudo, + Inner::Cstring => &Kind::Pseudo, + Inner::Any => &Kind::Pseudo, + Inner::Anyarray => &Kind::Pseudo, + Inner::Void => &Kind::Pseudo, + Inner::Trigger => &Kind::Pseudo, + Inner::LanguageHandler => &Kind::Pseudo, + Inner::Internal => &Kind::Pseudo, + Inner::Anyelement => &Kind::Pseudo, + Inner::RecordArray => &Kind::Pseudo, + Inner::Anynonarray => &Kind::Pseudo, + Inner::TxidSnapshotArray => &Kind::Array(Type(Inner::TxidSnapshot)), + Inner::Uuid => &Kind::Simple, + Inner::UuidArray => &Kind::Array(Type(Inner::Uuid)), + Inner::TxidSnapshot => &Kind::Simple, + Inner::FdwHandler => &Kind::Pseudo, + Inner::PgLsn => &Kind::Simple, + Inner::PgLsnArray => &Kind::Array(Type(Inner::PgLsn)), + Inner::TsmHandler => &Kind::Pseudo, + Inner::PgNdistinct => &Kind::Simple, + Inner::PgDependencies => &Kind::Simple, + Inner::Anyenum => &Kind::Pseudo, + Inner::TsVector => &Kind::Simple, + Inner::Tsquery => &Kind::Simple, + Inner::GtsVector => &Kind::Simple, + Inner::TsVectorArray => &Kind::Array(Type(Inner::TsVector)), + Inner::GtsVectorArray => &Kind::Array(Type(Inner::GtsVector)), + Inner::TsqueryArray => &Kind::Array(Type(Inner::Tsquery)), + Inner::Regconfig => &Kind::Simple, + Inner::RegconfigArray => &Kind::Array(Type(Inner::Regconfig)), + Inner::Regdictionary => &Kind::Simple, + Inner::RegdictionaryArray => &Kind::Array(Type(Inner::Regdictionary)), + Inner::Jsonb => &Kind::Simple, + Inner::JsonbArray => &Kind::Array(Type(Inner::Jsonb)), + Inner::AnyRange => &Kind::Pseudo, + Inner::EventTrigger => &Kind::Pseudo, + Inner::Int4Range => &Kind::Range(Type(Inner::Int4)), + Inner::Int4RangeArray => &Kind::Array(Type(Inner::Int4Range)), + Inner::NumRange => &Kind::Range(Type(Inner::Numeric)), + Inner::NumRangeArray => &Kind::Array(Type(Inner::NumRange)), + Inner::TsRange => &Kind::Range(Type(Inner::Timestamp)), + Inner::TsRangeArray => &Kind::Array(Type(Inner::TsRange)), + Inner::TstzRange => &Kind::Range(Type(Inner::Timestamptz)), + Inner::TstzRangeArray => &Kind::Array(Type(Inner::TstzRange)), + Inner::DateRange => &Kind::Range(Type(Inner::Date)), + Inner::DateRangeArray => &Kind::Array(Type(Inner::DateRange)), + Inner::Int8Range => &Kind::Range(Type(Inner::Int8)), + Inner::Int8RangeArray => &Kind::Array(Type(Inner::Int8Range)), + Inner::Jsonpath => &Kind::Simple, + Inner::JsonpathArray => &Kind::Array(Type(Inner::Jsonpath)), + Inner::Regnamespace => &Kind::Simple, + Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), + Inner::Regrole => &Kind::Simple, + Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), + Inner::Regcollation => &Kind::Simple, + Inner::RegcollationArray => &Kind::Array(Type(Inner::Regcollation)), + Inner::Int4multiRange => &Kind::Multirange(Type(Inner::Int4)), + Inner::NummultiRange => &Kind::Multirange(Type(Inner::Numeric)), + Inner::TsmultiRange => &Kind::Multirange(Type(Inner::Timestamp)), + Inner::TstzmultiRange => &Kind::Multirange(Type(Inner::Timestamptz)), + Inner::DatemultiRange => &Kind::Multirange(Type(Inner::Date)), + Inner::Int8multiRange => &Kind::Multirange(Type(Inner::Int8)), + Inner::AnymultiRange => &Kind::Pseudo, + Inner::AnycompatiblemultiRange => &Kind::Pseudo, + Inner::PgBrinBloomSummary => &Kind::Simple, + Inner::PgBrinMinmaxMultiSummary => &Kind::Simple, + Inner::PgMcvList => &Kind::Simple, + Inner::PgSnapshot => &Kind::Simple, + Inner::PgSnapshotArray => &Kind::Array(Type(Inner::PgSnapshot)), + Inner::Xid8 => &Kind::Simple, + Inner::Anycompatible => &Kind::Pseudo, + Inner::Anycompatiblearray => &Kind::Pseudo, + Inner::Anycompatiblenonarray => &Kind::Pseudo, + Inner::AnycompatibleRange => &Kind::Pseudo, + Inner::Int4multiRangeArray => &Kind::Array(Type(Inner::Int4multiRange)), + Inner::NummultiRangeArray => &Kind::Array(Type(Inner::NummultiRange)), + Inner::TsmultiRangeArray => &Kind::Array(Type(Inner::TsmultiRange)), + Inner::TstzmultiRangeArray => &Kind::Array(Type(Inner::TstzmultiRange)), + Inner::DatemultiRangeArray => &Kind::Array(Type(Inner::DatemultiRange)), + Inner::Int8multiRangeArray => &Kind::Array(Type(Inner::Int8multiRange)), Inner::Other(ref u) => &u.kind, } } @@ -1891,4 +1521,4 @@ impl Type { /// INT8MULTIRANGE[] pub const INT8MULTI_RANGE_ARRAY: Type = Type(Inner::Int8multiRangeArray); -} \ No newline at end of file +} diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 6f191fc16..13a1d75f9 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -279,7 +279,7 @@ impl SqlState { Inner::Other(code) => code, } } - + /// 00000 pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Inner::E00000); @@ -365,7 +365,8 @@ impl SqlState { pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Inner::E0Z000); /// 0Z002 - pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = SqlState(Inner::E0Z002); + pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = + SqlState(Inner::E0Z002); /// 20000 pub const CASE_NOT_FOUND: SqlState = SqlState(Inner::E20000); @@ -623,7 +624,8 @@ impl SqlState { pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25003); /// 25004 - pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25004); + pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = + SqlState(Inner::E25004); /// 25005 pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25005); @@ -1344,7 +1346,7 @@ enum Inner { EXX002, Other(Box), } - + #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { From eaa1c6279393ce8f0967cce7587638bb37135765 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Nov 2022 13:05:23 +0000 Subject: [PATCH 626/819] Update env_logger requirement from 0.9 to 0.10 Updates the requirements on [env_logger](https://github.com/rust-cli/env_logger) to permit the latest version. - [Release notes](https://github.com/rust-cli/env_logger/releases) - [Changelog](https://github.com/rust-cli/env_logger/blob/main/CHANGELOG.md) - [Commits](https://github.com/rust-cli/env_logger/compare/v0.9.0...v0.10.0) --- updated-dependencies: - dependency-name: env_logger dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 760105104..68737f738 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -62,7 +62,7 @@ tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] futures-executor = "0.3" criterion = "0.4" -env_logger = "0.9" +env_logger = "0.10" tokio = { version = "1.0", features = [ "macros", "net", From be90d0e848c0be8a6461127b81c7ad966c828ad0 Mon Sep 17 00:00:00 2001 From: xxchan Date: Wed, 7 Dec 2022 12:29:28 +0100 Subject: [PATCH 627/819] derive Debug for SimpleQueryMessage --- postgres-protocol/src/message/backend.rs | 1 + tokio-postgres/src/lib.rs | 1 + tokio-postgres/src/row.rs | 1 + tokio-postgres/src/simple_query.rs | 1 + 4 files changed, 4 insertions(+) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 45e5c4074..3f5374d64 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -524,6 +524,7 @@ impl CopyOutResponseBody { } } +#[derive(Debug)] pub struct DataRowBody { storage: Bytes, len: u16, diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index bd4d7b8ce..a9ecba4f1 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -239,6 +239,7 @@ pub enum AsyncMessage { } /// Message returned by the `SimpleQuery` stream. +#[derive(Debug)] #[non_exhaustive] pub enum SimpleQueryMessage { /// A row of data. diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index e3ed696c1..db179b432 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -196,6 +196,7 @@ impl AsName for SimpleColumn { } /// A row of data returned from the database by a simple query. +#[derive(Debug)] pub struct SimpleQueryRow { columns: Arc<[SimpleColumn]>, body: DataRowBody, diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 19cb10236..7c266e409 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use std::task::{Context, Poll}; /// Information about a column of a single query row. +#[derive(Debug)] pub struct SimpleColumn { name: String, } From 748167d5f8a489888209f58492153f52bd9d9e27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 13:07:35 +0000 Subject: [PATCH 628/819] Update base64 requirement from 0.13 to 0.20 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Release notes](https://github.com/marshallpierce/rust-base64/releases) - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.13.0...v0.20.0) --- updated-dependencies: - dependency-name: base64 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index a4716907b..2a72cc60c 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [dependencies] -base64 = "0.13" +base64 = "0.20" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" From 3697f6b63c67073925e1db4d5bb74f1a4dc8c3f3 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Fri, 26 Aug 2022 13:46:04 -0700 Subject: [PATCH 629/819] Add hostaddr support --- tokio-postgres/src/config.rs | 70 +++++++++++++++++++++++++++++++++++ tokio-postgres/src/connect.rs | 23 +++++++++++- 2 files changed, 91 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 5b364ec06..0c62b5030 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -13,6 +13,7 @@ use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] use std::ffi::OsStr; +use std::ops::Deref; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; #[cfg(unix)] @@ -91,6 +92,17 @@ pub enum Host { /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// - or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// Note that `host` is always required regardless of whether `hostaddr` is present. +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. @@ -122,6 +134,10 @@ pub enum Host { /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// @@ -158,6 +174,7 @@ pub struct Config { pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, pub(crate) host: Vec, + pub(crate) hostaddr: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, pub(crate) keepalives: bool, @@ -188,6 +205,7 @@ impl Config { application_name: None, ssl_mode: SslMode::Prefer, host: vec![], + hostaddr: vec![], port: vec![], connect_timeout: None, keepalives: true, @@ -298,6 +316,11 @@ impl Config { &self.host } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[String] { + self.hostaddr.deref() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -310,6 +333,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: &str) -> &mut Config { + self.hostaddr.push(hostaddr.to_string()); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which @@ -455,6 +487,11 @@ impl Config { self.host(host); } } + "hostaddr" => { + for hostaddr in value.split(',') { + self.hostaddr(hostaddr); + } + } "port" => { for port in value.split(',') { let port = if port.is_empty() { @@ -593,6 +630,7 @@ impl fmt::Debug for Config { .field("application_name", &self.application_name) .field("ssl_mode", &self.ssl_mode) .field("host", &self.host) + .field("hostaddr", &self.hostaddr) .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) .field("keepalives", &self.keepalives) @@ -975,3 +1013,35 @@ impl<'a> UrlParser<'a> { .map_err(|e| Error::config_parse(e.into())) } } + +#[cfg(test)] +mod tests { + use crate::{config::Host, Config}; + + #[test] + fn test_simple_parsing() { + let s = "user=pass_user dbname=postgres host=host1,host2 hostaddr=127.0.0.1,127.0.0.2 port=26257"; + let config = s.parse::().unwrap(); + assert_eq!(Some("pass_user"), config.get_user()); + assert_eq!(Some("postgres"), config.get_dbname()); + assert_eq!( + [ + Host::Tcp("host1".to_string()), + Host::Tcp("host2".to_string()) + ], + config.get_hosts(), + ); + + assert_eq!(["127.0.0.1", "127.0.0.2"], config.get_hostaddrs(),); + + assert_eq!(1, 1); + } + + #[test] + fn test_empty_hostaddrs() { + let s = + "user=pass_user dbname=postgres host=host1,host2,host3 hostaddr=127.0.0.1,,127.0.0.2"; + let config = s.parse::().unwrap(); + assert_eq!(["127.0.0.1", "", "127.0.0.2"], config.get_hostaddrs(),); + } +} diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 97a00c812..c36677234 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -23,6 +23,15 @@ where return Err(Error::config("invalid number of ports".into())); } + if !config.hostaddr.is_empty() && config.hostaddr.len() != config.host.len() { + let msg = format!( + "invalid number of hostaddrs ({}). Possible values: 0 or number of hosts ({})", + config.hostaddr.len(), + config.host.len(), + ); + return Err(Error::config(msg.into())); + } + let mut error = None; for (i, host) in config.host.iter().enumerate() { let port = config @@ -32,18 +41,28 @@ where .copied() .unwrap_or(5432); + // The value of host is always used as the hostname for TLS validation. let hostname = match host { Host::Tcp(host) => host.as_str(), // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] Host::Unix(_) => "", }; - let tls = tls .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - match connect_once(host, port, tls, config).await { + // If both host and hostaddr are specified, the value of hostaddr is used to to establish the TCP connection. + let hostaddr = match host { + Host::Tcp(_hostname) => match config.hostaddr.get(i) { + Some(hostaddr) if hostaddr.is_empty() => Host::Tcp(hostaddr.clone()), + _ => host.clone(), + }, + #[cfg(unix)] + Host::Unix(_v) => host.clone(), + }; + + match connect_once(&hostaddr, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } From 48874dc5753e33f49508ba986d7f1d7bc74b4a74 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:40:57 -0700 Subject: [PATCH 630/819] IpAddr + try hostaddr first --- tokio-postgres/src/config.rs | 36 ++++++++++-------- tokio-postgres/src/connect.rs | 61 +++++++++++++++++++------------ tokio-postgres/tests/test/main.rs | 52 ++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 39 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 0c62b5030..34accdbe8 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -13,6 +13,7 @@ use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] use std::ffi::OsStr; +use std::net::IpAddr; use std::ops::Deref; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; @@ -98,7 +99,9 @@ pub enum Host { /// - or if host specifies an IP address, that value will be used directly. /// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications /// with time constraints. However, a host name is required for verify-full SSL certificate verification. -/// Note that `host` is always required regardless of whether `hostaddr` is present. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; /// * If `host` is specified without `hostaddr`, a host name lookup occurs; /// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. /// The value for `host` is ignored unless the authentication method requires it, @@ -174,7 +177,7 @@ pub struct Config { pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, pub(crate) host: Vec, - pub(crate) hostaddr: Vec, + pub(crate) hostaddr: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, pub(crate) keepalives: bool, @@ -317,7 +320,7 @@ impl Config { } /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. - pub fn get_hostaddrs(&self) -> &[String] { + pub fn get_hostaddrs(&self) -> &[IpAddr] { self.hostaddr.deref() } @@ -337,8 +340,8 @@ impl Config { /// /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. - pub fn hostaddr(&mut self, hostaddr: &str) -> &mut Config { - self.hostaddr.push(hostaddr.to_string()); + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.hostaddr.push(hostaddr); self } @@ -489,7 +492,10 @@ impl Config { } "hostaddr" => { for hostaddr in value.split(',') { - self.hostaddr(hostaddr); + let addr = hostaddr + .parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("hostaddr"))))?; + self.hostaddr(addr); } } "port" => { @@ -1016,6 +1022,8 @@ impl<'a> UrlParser<'a> { #[cfg(test)] mod tests { + use std::net::IpAddr; + use crate::{config::Host, Config}; #[test] @@ -1032,16 +1040,14 @@ mod tests { config.get_hosts(), ); - assert_eq!(["127.0.0.1", "127.0.0.2"], config.get_hostaddrs(),); + assert_eq!( + [ + "127.0.0.1".parse::().unwrap(), + "127.0.0.2".parse::().unwrap() + ], + config.get_hostaddrs(), + ); assert_eq!(1, 1); } - - #[test] - fn test_empty_hostaddrs() { - let s = - "user=pass_user dbname=postgres host=host1,host2,host3 hostaddr=127.0.0.1,,127.0.0.2"; - let config = s.parse::().unwrap(); - assert_eq!(["127.0.0.1", "", "127.0.0.2"], config.get_hostaddrs(),); - } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index c36677234..ee1dc1c76 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -5,8 +5,8 @@ use crate::connect_socket::connect_socket; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; use futures_util::{future, pin_mut, Future, FutureExt, Stream}; -use std::io; use std::task::Poll; +use std::{cmp, io}; pub async fn connect( mut tls: T, @@ -15,25 +15,35 @@ pub async fn connect( where T: MakeTlsConnect, { - if config.host.is_empty() { - return Err(Error::config("host missing".into())); + if config.host.is_empty() && config.hostaddr.is_empty() { + return Err(Error::config("both host and hostaddr are missing".into())); } - if config.port.len() > 1 && config.port.len() != config.host.len() { - return Err(Error::config("invalid number of ports".into())); - } - - if !config.hostaddr.is_empty() && config.hostaddr.len() != config.host.len() { + if !config.host.is_empty() + && !config.hostaddr.is_empty() + && config.host.len() != config.hostaddr.len() + { let msg = format!( - "invalid number of hostaddrs ({}). Possible values: 0 or number of hosts ({})", - config.hostaddr.len(), + "number of hosts ({}) is different from number of hostaddrs ({})", config.host.len(), + config.hostaddr.len(), ); return Err(Error::config(msg.into())); } + // At this point, either one of the following two scenarios could happen: + // (1) either config.host or config.hostaddr must be empty; + // (2) if both config.host and config.hostaddr are NOT empty; their lengths must be equal. + let num_hosts = cmp::max(config.host.len(), config.hostaddr.len()); + + if config.port.len() > 1 && config.port.len() != num_hosts { + return Err(Error::config("invalid number of ports".into())); + } + let mut error = None; - for (i, host) in config.host.iter().enumerate() { + for i in 0..num_hosts { + let host = config.host.get(i); + let hostaddr = config.hostaddr.get(i); let port = config .port .get(i) @@ -42,27 +52,30 @@ where .unwrap_or(5432); // The value of host is always used as the hostname for TLS validation. + // postgres doesn't support TLS over unix sockets, so the choice for Host::Unix variant here doesn't matter let hostname = match host { - Host::Tcp(host) => host.as_str(), - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", + Some(Host::Tcp(host)) => host.as_str(), + _ => "", }; let tls = tls .make_tls_connect(hostname) .map_err(|e| Error::tls(e.into()))?; - // If both host and hostaddr are specified, the value of hostaddr is used to to establish the TCP connection. - let hostaddr = match host { - Host::Tcp(_hostname) => match config.hostaddr.get(i) { - Some(hostaddr) if hostaddr.is_empty() => Host::Tcp(hostaddr.clone()), - _ => host.clone(), - }, - #[cfg(unix)] - Host::Unix(_v) => host.clone(), + // Try to use the value of hostaddr to establish the TCP connection, + // fallback to host if hostaddr is not present. + let addr = match hostaddr { + Some(ipaddr) => Host::Tcp(ipaddr.to_string()), + None => { + if let Some(host) = host { + host.clone() + } else { + // This is unreachable. + return Err(Error::config("both host and hostaddr are empty".into())); + } + } }; - match connect_once(&hostaddr, port, tls, config).await { + match connect_once(&addr, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 0ab4a7bab..387c90d7c 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -147,6 +147,58 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } +#[tokio::test] +async fn host_only_ok() { + let _ = tokio_postgres::connect( + "host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_only_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_and_host_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_mismatch() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_both_missing() { + let _ = tokio_postgres::connect( + "port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + #[tokio::test] async fn pipelined_prepare() { let client = connect("user=postgres").await; From d97bed635ef3fe21a3d9dbef0945e57ab2baf8ba Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:55:11 -0700 Subject: [PATCH 631/819] also update postgres --- postgres/src/config.rs | 33 +++++++++++++++++++++++++++++++++ tokio-postgres/src/config.rs | 1 + 2 files changed, 34 insertions(+) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b541ec846..a754ff91f 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -6,6 +6,7 @@ use crate::connection::Connection; use crate::Client; use log::info; use std::fmt; +use std::net::IpAddr; use std::path::Path; use std::str::FromStr; use std::sync::Arc; @@ -39,6 +40,19 @@ use tokio_postgres::{Error, Socket}; /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// - or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. @@ -67,6 +81,10 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// @@ -204,6 +222,7 @@ impl Config { /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// There must be either no hosts, or the same number of hosts as hostaddrs. pub fn host(&mut self, host: &str) -> &mut Config { self.config.host(host); self @@ -214,6 +233,11 @@ impl Config { self.config.get_hosts() } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[IpAddr] { + self.config.get_hostaddrs() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -226,6 +250,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.config.hostaddr(hostaddr); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 34accdbe8..923da2985 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -302,6 +302,7 @@ impl Config { /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// There must be either no hosts, or the same number of hosts as hostaddrs. pub fn host(&mut self, host: &str) -> &mut Config { #[cfg(unix)] { From 1a9c1d4ff3e25b7bef01f05c3e396b2eec1564d9 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sat, 27 Aug 2022 11:55:47 -0700 Subject: [PATCH 632/819] fmt --- postgres/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index a754ff91f..921566b66 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -83,7 +83,7 @@ use tokio_postgres::{Error, Socket}; /// ```not_rust /// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write /// ``` -/// +/// /// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` @@ -236,7 +236,7 @@ impl Config { /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. pub fn get_hostaddrs(&self) -> &[IpAddr] { self.config.get_hostaddrs() - } + } /// Adds a Unix socket host to the configuration. /// From 58149dacf6f4633a3c2b24cda442623bd2abb08d Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sun, 28 Aug 2022 12:09:53 -0700 Subject: [PATCH 633/819] explicitly handle host being None --- tokio-postgres/src/connect.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index ee1dc1c76..63574516c 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -51,14 +51,17 @@ where .copied() .unwrap_or(5432); - // The value of host is always used as the hostname for TLS validation. - // postgres doesn't support TLS over unix sockets, so the choice for Host::Unix variant here doesn't matter + // The value of host is used as the hostname for TLS validation, + // if it's not present, use the value of hostaddr. let hostname = match host { - Some(Host::Tcp(host)) => host.as_str(), - _ => "", + Some(Host::Tcp(host)) => host.clone(), + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter Some() + #[cfg(unix)] + Some(Host::Unix(_)) => "".to_string(), + None => hostaddr.map_or("".to_string(), |ipaddr| ipaddr.to_string()), }; let tls = tls - .make_tls_connect(hostname) + .make_tls_connect(&hostname) .map_err(|e| Error::tls(e.into()))?; // Try to use the value of hostaddr to establish the TCP connection, From 7a648ad0cb911cb9144c0db441399f3189d28b3b Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Sun, 28 Aug 2022 12:18:36 -0700 Subject: [PATCH 634/819] add negative test --- tokio-postgres/src/config.rs | 6 ++++++ tokio-postgres/src/connect.rs | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 923da2985..e5bed8ddf 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1051,4 +1051,10 @@ mod tests { assert_eq!(1, 1); } + + #[test] + fn test_invalid_hostaddr_parsing() { + let s = "user=pass_user dbname=postgres host=host1 hostaddr=127.0.0 port=26257"; + s.parse::().err().unwrap(); + } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 63574516c..888f9cf8a 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -55,7 +55,7 @@ where // if it's not present, use the value of hostaddr. let hostname = match host { Some(Host::Tcp(host)) => host.clone(), - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter Some() + // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] Some(Host::Unix(_)) => "".to_string(), None => hostaddr.map_or("".to_string(), |ipaddr| ipaddr.to_string()), From a70a7c36c74bfeaf1e171dc2572fddd30d182179 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Tue, 30 Aug 2022 22:10:19 -0700 Subject: [PATCH 635/819] move test to runtime --- tokio-postgres/tests/test/main.rs | 52 ---------------------------- tokio-postgres/tests/test/runtime.rs | 52 ++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 387c90d7c..0ab4a7bab 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -147,58 +147,6 @@ async fn scram_password_ok() { connect("user=scram_user password=password dbname=postgres").await; } -#[tokio::test] -async fn host_only_ok() { - let _ = tokio_postgres::connect( - "host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_only_ok() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_and_host_ok() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_host_mismatch() { - let _ = tokio_postgres::connect( - "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .err() - .unwrap(); -} - -#[tokio::test] -async fn hostaddr_host_both_missing() { - let _ = tokio_postgres::connect( - "port=5433 user=pass_user dbname=postgres password=password", - NoTls, - ) - .await - .err() - .unwrap(); -} - #[tokio::test] async fn pipelined_prepare() { let client = connect("user=postgres").await; diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index 67b4ead8a..86c1f0701 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -66,6 +66,58 @@ async fn target_session_attrs_err() { .unwrap(); } +#[tokio::test] +async fn host_only_ok() { + let _ = tokio_postgres::connect( + "host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_only_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_and_host_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_mismatch() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_both_missing() { + let _ = tokio_postgres::connect( + "port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + #[tokio::test] async fn cancel_query() { let client = connect("host=localhost port=5433 user=postgres").await; From d368475b50880b920ffbd73b7d6398b2171db53f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 30 Dec 2022 13:47:41 +0200 Subject: [PATCH 636/819] Add flush() to front-end messages The PostgreSQL wire protocol has a "Flush" message, which can be used by the clients for long-lived connections. Add a flush() helper for it. --- postgres-protocol/src/message/frontend.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 5d0a8ff8c..600f7da48 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -271,6 +271,12 @@ where }) } +#[inline] +pub fn flush(buf: &mut BytesMut) { + buf.put_u8(b'H'); + write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); +} + #[inline] pub fn sync(buf: &mut BytesMut) { buf.put_u8(b'S'); From 8ab8f1a5552545986ba660cf2ad45ae2c8e54160 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 30 Dec 2022 08:43:24 -0500 Subject: [PATCH 637/819] Clippy --- postgres-protocol/src/message/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 3f5374d64..e0eacbea0 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -582,7 +582,7 @@ impl<'a> FallibleIterator for DataRowRanges<'a> { )); } let base = self.len - self.buf.len(); - self.buf = &self.buf[len as usize..]; + self.buf = &self.buf[len..]; Ok(Some(Some(base..base + len))) } } From 0c056148d0c484def905b16b539fb845e31541d4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 1 Jan 2023 20:33:06 -0500 Subject: [PATCH 638/819] Clarify poll_message docs Closes #975 --- tokio-postgres/src/connection.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 30be4e834..414335955 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -302,6 +302,9 @@ where /// /// The server can send notices as well as notifications asynchronously to the client. Applications that wish to /// examine those messages should use this method to drive the connection rather than its `Future` implementation. + /// + /// Return values of `None` or `Some(Err(_))` are "terminal"; callers should not invoke this method again after + /// receiving one of those values. pub fn poll_message( &mut self, cx: &mut Context<'_>, From e38e435665af8dbd55e7d803f019405653f99205 Mon Sep 17 00:00:00 2001 From: Ari Breitkreuz Date: Sun, 8 Jan 2023 12:21:42 +0100 Subject: [PATCH 639/819] add batch_execute to generic client --- tokio-postgres/src/generic_client.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index b2a907558..f05a422cc 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -69,6 +69,10 @@ pub trait GenericClient: private::Sealed { /// Like `Client::transaction`. async fn transaction(&mut self) -> Result, Error>; + /// Like `Client::batch_execute`. + async fn batch_execute(&self, query: &str) -> Result<(), Error>; + + /// Returns a reference to the underlying `Client`. fn client(&self) -> &Client; } @@ -149,6 +153,10 @@ impl GenericClient for Client { self.transaction().await } + async fn batch_execute(&self, query: &str) -> Result<(), Error> { + self.batch_execute(query).await + } + fn client(&self) -> &Client { self } @@ -232,6 +240,10 @@ impl GenericClient for Transaction<'_> { self.transaction().await } + async fn batch_execute(&self, query: &str) -> Result<(), Error> { + self.batch_execute(query).await + } + fn client(&self) -> &Client { self.client() } From 383fd50f75a0314cde53b46ec9aafcba870fadbe Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 8 Jan 2023 08:32:00 -0500 Subject: [PATCH 640/819] Update tokio-postgres/src/generic_client.rs --- tokio-postgres/src/generic_client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index f05a422cc..50cff9712 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -72,7 +72,6 @@ pub trait GenericClient: private::Sealed { /// Like `Client::batch_execute`. async fn batch_execute(&self, query: &str) -> Result<(), Error>; - /// Returns a reference to the underlying `Client`. fn client(&self) -> &Client; } From c5cb28bcaf2f4031a88c044b40750a65391d8be8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 13:06:33 +0000 Subject: [PATCH 641/819] Update base64 requirement from 0.20 to 0.21 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Release notes](https://github.com/marshallpierce/rust-base64/releases) - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: base64 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 2a72cc60c..922e92313 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" [dependencies] -base64 = "0.20" +base64 = "0.21" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" From 10bf27128dc4444c650bb371c835a9b87cea4102 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 9 Jan 2023 20:41:16 -0500 Subject: [PATCH 642/819] fix build --- postgres-protocol/src/authentication/sasl.rs | 16 ++++++++++++---- postgres-protocol/src/password/mod.rs | 8 +++++--- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index ea2f55cad..2352a66c4 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,5 +1,8 @@ //! SASL-based authentication support. +use base64::display::Base64Display; +use base64::engine::general_purpose::STANDARD; +use base64::Engine; use hmac::{Hmac, Mac}; use rand::{self, Rng}; use sha2::digest::FixedOutput; @@ -189,7 +192,7 @@ impl ScramSha256 { return Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid nonce")); } - let salt = match base64::decode(parsed.salt) { + let salt = match STANDARD.decode(parsed.salt) { Ok(salt) => salt, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; @@ -208,7 +211,7 @@ impl ScramSha256 { let mut cbind_input = vec![]; cbind_input.extend(channel_binding.gs2_header().as_bytes()); cbind_input.extend(channel_binding.cbind_data()); - let cbind_input = base64::encode(&cbind_input); + let cbind_input = STANDARD.encode(&cbind_input); self.message.clear(); write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap(); @@ -225,7 +228,12 @@ impl ScramSha256 { *proof ^= signature; } - write!(&mut self.message, ",p={}", base64::encode(&*client_proof)).unwrap(); + write!( + &mut self.message, + ",p={}", + Base64Display::new(&client_proof, &STANDARD) + ) + .unwrap(); self.state = State::Finish { salted_password, @@ -262,7 +270,7 @@ impl ScramSha256 { ServerFinalMessage::Verifier(verifier) => verifier, }; - let verifier = match base64::decode(verifier) { + let verifier = match STANDARD.decode(verifier) { Ok(verifier) => verifier, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs index a60687bbe..f03bb811d 100644 --- a/postgres-protocol/src/password/mod.rs +++ b/postgres-protocol/src/password/mod.rs @@ -7,6 +7,8 @@ //! end up in logs pg_stat displays, etc. use crate::authentication::sasl; +use base64::display::Base64Display; +use base64::engine::general_purpose::STANDARD; use hmac::{Hmac, Mac}; use md5::Md5; use rand::RngCore; @@ -80,9 +82,9 @@ pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_ format!( "SCRAM-SHA-256${}:{}${}:{}", SCRAM_DEFAULT_ITERATIONS, - base64::encode(salt), - base64::encode(stored_key), - base64::encode(server_key) + Base64Display::new(&salt, &STANDARD), + Base64Display::new(&stored_key, &STANDARD), + Base64Display::new(&server_key, &STANDARD) ) } From cf77dc06e69a08327f2656fd8e895038d66da12e Mon Sep 17 00:00:00 2001 From: Tom Parker-Shemilt Date: Tue, 10 Jan 2023 11:47:53 +0000 Subject: [PATCH 643/819] futures-util <0.3.14 doesn't have Stream --- postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index bd7c297f3..e3a8663f2 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -38,7 +38,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -futures-util = { version = "0.3", features = ["sink"] } +futures-util = { version = "0.3.14", features = ["sink"] } tokio-postgres = { version = "0.7.7", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } From 205af89aafb8fec5e8a75599e0401d5797932f13 Mon Sep 17 00:00:00 2001 From: Alex Chi Date: Sun, 15 Jan 2023 21:16:56 -0500 Subject: [PATCH 644/819] feat: add `rows_affected` to RowStream Signed-off-by: Alex Chi --- tokio-postgres/src/copy_in.rs | 10 ++----- tokio-postgres/src/query.rs | 43 +++++++++++++++++++++--------- tokio-postgres/src/simple_query.rs | 10 ++----- 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index de1da933b..59e31fea6 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,6 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::query::extract_row_affected; use crate::{query, slice_iter, Error, Statement}; use bytes::{Buf, BufMut, BytesMut}; use futures_channel::mpsc; @@ -110,14 +111,7 @@ where let this = self.as_mut().project(); match ready!(this.responses.poll_next(cx))? { Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); + let rows = extract_row_affected(&body)?; return Poll::Ready(Ok(rows)); } _ => return Poll::Ready(Err(Error::unexpected_message())), diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 71db8769a..72e92ccda 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -7,7 +7,7 @@ use bytes::{Bytes, BytesMut}; use futures_util::{ready, Stream}; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; -use postgres_protocol::message::backend::Message; +use postgres_protocol::message::backend::{CommandCompleteBody, Message}; use postgres_protocol::message::frontend; use std::fmt; use std::marker::PhantomPinned; @@ -52,6 +52,7 @@ where Ok(RowStream { statement, responses, + rows_affected: None, _p: PhantomPinned, }) } @@ -72,10 +73,24 @@ pub async fn query_portal( Ok(RowStream { statement: portal.statement().clone(), responses, + rows_affected: None, _p: PhantomPinned, }) } +/// Extract the number of rows affected from [`CommandCompleteBody`]. +pub fn extract_row_affected(body: &CommandCompleteBody) -> Result { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + Ok(rows) +} + pub async fn execute( client: &InnerClient, statement: Statement, @@ -104,14 +119,7 @@ where match responses.next().await? { Message::DataRow(_) => {} Message::CommandComplete(body) => { - rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); + rows = extract_row_affected(&body)?; } Message::EmptyQueryResponse => rows = 0, Message::ReadyForQuery(_) => return Ok(rows), @@ -202,6 +210,7 @@ pin_project! { pub struct RowStream { statement: Statement, responses: Responses, + rows_affected: Option, #[pin] _p: PhantomPinned, } @@ -217,12 +226,22 @@ impl Stream for RowStream { Message::DataRow(body) => { return Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) } - Message::EmptyQueryResponse - | Message::CommandComplete(_) - | Message::PortalSuspended => {} + Message::CommandComplete(body) => { + *this.rows_affected = Some(extract_row_affected(&body)?); + } + Message::EmptyQueryResponse | Message::PortalSuspended => {} Message::ReadyForQuery(_) => return Poll::Ready(None), _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), } } } } + +impl RowStream { + /// Returns the number of rows affected by the query. + /// + /// This will be `None` if the information is not available yet. + pub fn rows_affected(&self) -> Option { + self.rows_affected + } +} diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 7c266e409..bcc6d928b 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -1,6 +1,7 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::query::extract_row_affected; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; @@ -87,14 +88,7 @@ impl Stream for SimpleQueryStream { loop { match ready!(this.responses.poll_next(cx)?) { Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); + let rows = extract_row_affected(&body)?; return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); } Message::EmptyQueryResponse => { From b1842390c3946947ed43bce998d3480b699786ec Mon Sep 17 00:00:00 2001 From: Alex Chi Date: Mon, 16 Jan 2023 22:54:20 -0500 Subject: [PATCH 645/819] update comments Signed-off-by: Alex Chi --- tokio-postgres/src/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 72e92ccda..12176353b 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -240,7 +240,7 @@ impl Stream for RowStream { impl RowStream { /// Returns the number of rows affected by the query. /// - /// This will be `None` if the information is not available yet. + /// This function will return `None` until the stream has been exhausted. pub fn rows_affected(&self) -> Option { self.rows_affected } From eb086d30a30bb402daddd29cbd53bfcfc9c00a7f Mon Sep 17 00:00:00 2001 From: fakeshadow <24548779@qq.com> Date: Fri, 24 Feb 2023 09:56:05 +0800 Subject: [PATCH 646/819] export Bytes type through DataRowBody::storage_bytes method. --- postgres-protocol/src/message/backend.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index e0eacbea0..2b245101e 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -544,6 +544,11 @@ impl DataRowBody { pub fn buffer(&self) -> &[u8] { &self.storage } + + #[inline] + pub fn storage_bytes(&self) -> &Bytes { + &self.storage + } } pub struct DataRowRanges<'a> { From 842eacefc82e1e2d8ef69d60eb14c5e421c5298b Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 25 Feb 2023 10:10:04 -0500 Subject: [PATCH 647/819] fix ci --- .github/workflows/ci.yml | 2 +- postgres-derive-test/src/lib.rs | 4 ++-- postgres-protocol/src/authentication/sasl.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8d17f4d6b..92afbb0c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.62.0 + version: 1.63.0 - run: echo "::set-output name=version::$(rustc --version)" id: rust-version - uses: actions/cache@v1 diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index d1478ac4c..f0534f32c 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -14,7 +14,7 @@ where T: PartialEq + FromSqlOwned + ToSql + Sync, S: fmt::Display, { - for &(ref val, ref repr) in checks.iter() { + for (val, repr) in checks.iter() { let stmt = conn .prepare(&format!("SELECT {}::{}", *repr, sql_type)) .unwrap(); @@ -38,7 +38,7 @@ pub fn test_type_asymmetric( S: fmt::Display, C: Fn(&T, &F) -> bool, { - for &(ref val, ref repr) in checks.iter() { + for (val, repr) in checks.iter() { let stmt = conn .prepare(&format!("SELECT {}::{}", *repr, sql_type)) .unwrap(); diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index 2352a66c4..4a77507e9 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -359,7 +359,7 @@ impl<'a> Parser<'a> { } fn posit_number(&mut self) -> io::Result { - let n = self.take_while(|c| matches!(c, '0'..='9'))?; + let n = self.take_while(|c| c.is_ascii_digit())?; n.parse() .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } From 63c09eb083ac3a2c3e39c798cda49d35938fefc4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 25 Feb 2023 10:18:38 -0500 Subject: [PATCH 648/819] fix workflow deprecations --- .github/workflows/ci.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 92afbb0c8..f81bdee46 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: name: rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: sfackler/actions/rustup@master - uses: sfackler/actions/rustfmt@master @@ -25,23 +25,23 @@ jobs: name: clippy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: sfackler/actions/rustup@master - - run: echo "::set-output name=version::$(rustc --version)" + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: ~/.cargo/registry/index key: index-${{ runner.os }}-${{ github.run_number }} restore-keys: | index-${{ runner.os }}- - run: cargo generate-lockfile - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: ~/.cargo/registry/cache key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo fetch - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: target key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y @@ -51,26 +51,26 @@ jobs: name: test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - run: docker compose up -d - uses: sfackler/actions/rustup@master with: version: 1.63.0 - - run: echo "::set-output name=version::$(rustc --version)" + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: ~/.cargo/registry/index key: index-${{ runner.os }}-${{ github.run_number }} restore-keys: | index-${{ runner.os }}- - run: cargo generate-lockfile - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: ~/.cargo/registry/cache key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} - run: cargo fetch - - uses: actions/cache@v1 + - uses: actions/cache@v3 with: path: target key: test-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y From 7c7c80dcaab7051bce4f31c36abe78c139f08050 Mon Sep 17 00:00:00 2001 From: Benjamin Swart Date: Thu, 23 Feb 2023 18:32:15 +0100 Subject: [PATCH 649/819] Make incorrect number of paramaters an error --- postgres/src/client.rs | 20 -------------------- tokio-postgres/src/client.rs | 32 -------------------------------- tokio-postgres/src/error/mod.rs | 8 ++++++++ tokio-postgres/src/query.rs | 9 +++------ 4 files changed, 11 insertions(+), 58 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index 29cac840d..c8e14cf81 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -57,10 +57,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Example /// /// ```no_run @@ -96,10 +92,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -134,10 +126,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -172,10 +160,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -213,10 +197,6 @@ impl Client { /// It takes an iterator of parameters rather than a slice, and returns an iterator of rows rather than collecting /// them into an array. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index ad5aa2866..5d0d2c536 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -230,10 +230,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query( &self, statement: &T, @@ -258,10 +254,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query_one( &self, statement: &T, @@ -295,10 +287,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query_opt( &self, statement: &T, @@ -331,10 +319,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// [`query`]: #method.query /// /// # Examples @@ -382,10 +366,6 @@ impl Client { /// with the `prepare` method. /// /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn execute( &self, statement: &T, @@ -406,10 +386,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// [`execute`]: #method.execute pub async fn execute_raw(&self, statement: &T, params: I) -> Result where @@ -426,10 +402,6 @@ impl Client { /// /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. The copy *must* /// be explicitly completed via the `Sink::close` or `finish` methods. If it is not, the copy will be aborted. - /// - /// # Panics - /// - /// Panics if the statement contains parameters. pub async fn copy_in(&self, statement: &T) -> Result, Error> where T: ?Sized + ToStatement, @@ -442,10 +414,6 @@ impl Client { /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. /// /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. - /// - /// # Panics - /// - /// Panics if the statement contains parameters. pub async fn copy_out(&self, statement: &T) -> Result where T: ?Sized + ToStatement, diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 47a31e793..f1e2644c6 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -344,6 +344,7 @@ enum Kind { ToSql(usize), FromSql(usize), Column(String), + Parameters(usize, usize), Closed, Db, Parse, @@ -383,6 +384,9 @@ impl fmt::Display for Error { Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::Parameters(real, expected) => { + write!(fmt, "expected {expected} parameters but got {real}")? + } Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -474,6 +478,10 @@ impl Error { Error::new(Kind::Column(column), None) } + pub(crate) fn parameters(real: usize, expected: usize) -> Error { + Error::new(Kind::Parameters(real, expected), None) + } + pub(crate) fn tls(e: Box) -> Error { Error::new(Kind::Tls, Some(e)) } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 12176353b..e6e1d00a8 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -167,12 +167,9 @@ where let param_types = statement.params(); let params = params.into_iter(); - assert!( - param_types.len() == params.len(), - "expected {} parameters but got {}", - param_types.len(), - params.len() - ); + if param_types.len() != params.len() { + return Err(Error::parameters(params.len(), param_types.len())); + } let (param_formats, params): (Vec<_>, Vec<_>) = params .zip(param_types.iter()) From 7cd7b187a5cb990ceb0ea9531cd3345b1e2799c3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 26 Feb 2023 16:44:25 -0500 Subject: [PATCH 650/819] Rename accessor --- postgres-protocol/src/message/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 2b245101e..1b5be1098 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -546,7 +546,7 @@ impl DataRowBody { } #[inline] - pub fn storage_bytes(&self) -> &Bytes { + pub fn buffer_bytes(&self) -> &Bytes { &self.storage } } From 1e1f90786d5df3ac9701b667f9a57bf783357455 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 14:06:03 +0000 Subject: [PATCH 651/819] Update syn requirement from 1.0 to 2.0 Updates the requirements on [syn](https://github.com/dtolnay/syn) to permit the latest version. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/1.0.0...2.0.3) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres-derive/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 22b50b707..8470bc8a9 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -12,6 +12,6 @@ proc-macro = true test = false [dependencies] -syn = "1.0" +syn = "2.0" proc-macro2 = "1.0" quote = "1.0" From e9b5a04a4f1f13d8ecb83211132618b5de6af748 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 20 Mar 2023 20:01:56 -0400 Subject: [PATCH 652/819] Fix build --- postgres-derive/src/fromsql.rs | 12 ++++++------ postgres-derive/src/overrides.rs | 21 ++++++++++++--------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index f458c6e3d..bb87ded5f 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -3,10 +3,10 @@ use quote::{format_ident, quote}; use std::iter; use syn::{ punctuated::Punctuated, token, AngleBracketedGenericArguments, Data, DataStruct, DeriveInput, - Error, Fields, GenericArgument, GenericParam, Generics, Ident, Lifetime, LifetimeDef, - PathArguments, PathSegment, + Error, Fields, GenericArgument, GenericParam, Generics, Ident, Lifetime, PathArguments, + PathSegment, }; -use syn::{TraitBound, TraitBoundModifier, TypeParamBound}; +use syn::{LifetimeParam, TraitBound, TraitBoundModifier, TypeParamBound}; use crate::accepts; use crate::composites::Field; @@ -96,9 +96,9 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let (impl_generics, _, _) = generics.split_for_impl(); let (_, ty_generics, where_clause) = input.generics.split_for_impl(); let out = quote! { - impl#impl_generics postgres_types::FromSql<#lifetime> for #ident#ty_generics #where_clause { + impl #impl_generics postgres_types::FromSql<#lifetime> for #ident #ty_generics #where_clause { fn from_sql(_type: &postgres_types::Type, buf: &#lifetime [u8]) - -> std::result::Result<#ident#ty_generics, + -> std::result::Result<#ident #ty_generics, std::boxed::Box> { @@ -217,7 +217,7 @@ fn build_generics(source: &Generics) -> (Generics, Lifetime) { let mut out = append_generic_bound(source.to_owned(), &new_fromsql_bound(&lifetime)); out.params.insert( 0, - GenericParam::Lifetime(LifetimeDef::new(lifetime.to_owned())), + GenericParam::Lifetime(LifetimeParam::new(lifetime.to_owned())), ); (out, lifetime) diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index c00d5a94b..ddb37688b 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -1,4 +1,5 @@ -use syn::{Attribute, Error, Lit, Meta, NestedMeta}; +use syn::punctuated::Punctuated; +use syn::{Attribute, Error, Expr, ExprLit, Lit, Meta, Token}; pub struct Overrides { pub name: Option, @@ -13,26 +14,28 @@ impl Overrides { }; for attr in attrs { - let attr = attr.parse_meta()?; - if !attr.path().is_ident("postgres") { continue; } - let list = match attr { + let list = match &attr.meta { Meta::List(ref list) => list, bad => return Err(Error::new_spanned(bad, "expected a #[postgres(...)]")), }; - for item in &list.nested { + let nested = list.parse_args_with(Punctuated::::parse_terminated)?; + + for item in nested { match item { - NestedMeta::Meta(Meta::NameValue(meta)) => { + Meta::NameValue(meta) => { if !meta.path.is_ident("name") { return Err(Error::new_spanned(&meta.path, "unknown override")); } - let value = match &meta.lit { - Lit::Str(s) => s.value(), + let value = match &meta.value { + Expr::Lit(ExprLit { + lit: Lit::Str(lit), .. + }) => lit.value(), bad => { return Err(Error::new_spanned(bad, "expected a string literal")) } @@ -40,7 +43,7 @@ impl Overrides { overrides.name = Some(value); } - NestedMeta::Meta(Meta::Path(ref path)) => { + Meta::Path(path) => { if !path.is_ident("transparent") { return Err(Error::new_spanned(path, "unknown override")); } From b4c05f4f81152f430bb196a5b1956e1e36a6fcff Mon Sep 17 00:00:00 2001 From: Paul Hemberger Date: Fri, 24 Mar 2023 11:46:20 -0400 Subject: [PATCH 653/819] wire through knob for TCP user timeout --- tokio-postgres/Cargo.toml | 2 +- tokio-postgres/src/cancel_query.rs | 1 + tokio-postgres/src/client.rs | 1 + tokio-postgres/src/config.rs | 14 ++++++++++++++ tokio-postgres/src/connect.rs | 2 ++ tokio-postgres/src/connect_socket.rs | 12 +++++++++++- 6 files changed, 30 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 68737f738..807698f88 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -55,7 +55,7 @@ pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } -socket2 = "0.4" +socket2 = { version = "0.4", features = ["all"] } tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index b02729f85..022278804 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -38,6 +38,7 @@ where &config.host, config.port, config.connect_timeout, + config.user_timeout, config.keepalive.as_ref(), ) .await?; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 5d0d2c536..9760ee55b 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -156,6 +156,7 @@ pub(crate) struct SocketConfig { pub host: Host, pub port: u16, pub connect_timeout: Option, + pub user_timeout: Option, pub keepalive: Option, } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 5b364ec06..5930fbd9e 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -160,6 +160,7 @@ pub struct Config { pub(crate) host: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, + pub(crate) user_timeout: Option, pub(crate) keepalives: bool, pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, @@ -190,6 +191,7 @@ impl Config { host: vec![], port: vec![], connect_timeout: None, + user_timeout: None, keepalives: true, keepalive_config, target_session_attrs: TargetSessionAttrs::Any, @@ -340,6 +342,18 @@ impl Config { self.connect_timeout.as_ref() } + /// Sets the TCP user timeout. + pub fn user_timeout(&mut self, user_timeout: Duration) -> &mut Config { + self.user_timeout = Some(user_timeout); + self + } + + /// Gets the TCP user timeout, if one has been set with the + /// `user_timeout` method. + pub fn get_user_timeout(&self) -> Option<&Duration> { + self.user_timeout.as_ref() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 97a00c812..0006be154 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -65,6 +65,7 @@ where host, port, config.connect_timeout, + config.user_timeout, if config.keepalives { Some(&config.keepalive_config) } else { @@ -118,6 +119,7 @@ where host: host.clone(), port, connect_timeout: config.connect_timeout, + user_timeout: config.user_timeout, keepalive: if config.keepalives { Some(config.keepalive_config.clone()) } else { diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 19d01d87a..3380ccae9 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -14,6 +14,7 @@ pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, + user_timeout: Option, keepalive_config: Option<&KeepaliveConfig>, ) -> Result { match host { @@ -35,8 +36,17 @@ pub(crate) async fn connect_socket( }; stream.set_nodelay(true).map_err(Error::connect)?; + + let sock_ref = SockRef::from(&stream); + #[cfg(target_os = "linux")] + { + sock_ref + .set_tcp_user_timeout(user_timeout) + .map_err(Error::timeout)?; + } + if let Some(keepalive_config) = keepalive_config { - SockRef::from(&stream) + sock_ref .set_tcp_keepalive(&TcpKeepalive::from(keepalive_config)) .map_err(Error::connect)?; } From ee5a139028f9f470f4adae61d80e0404fe310ab2 Mon Sep 17 00:00:00 2001 From: Paul Hemberger Date: Fri, 24 Mar 2023 13:11:18 -0400 Subject: [PATCH 654/819] add in to param parsing; update doc --- tokio-postgres/src/cancel_query.rs | 2 +- tokio-postgres/src/client.rs | 2 +- tokio-postgres/src/config.rs | 28 ++++++++++++++++++++++------ tokio-postgres/src/connect.rs | 4 ++-- tokio-postgres/src/connect_socket.rs | 4 ++-- 5 files changed, 28 insertions(+), 12 deletions(-) diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index 022278804..d869b5824 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -38,7 +38,7 @@ where &config.host, config.port, config.connect_timeout, - config.user_timeout, + config.tcp_user_timeout, config.keepalive.as_ref(), ) .await?; diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 9760ee55b..8b7df4e87 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -156,7 +156,7 @@ pub(crate) struct SocketConfig { pub host: Host, pub port: u16, pub connect_timeout: Option, - pub user_timeout: Option, + pub tcp_user_timeout: Option, pub keepalive: Option, } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 5930fbd9e..fd848153f 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -96,6 +96,9 @@ pub enum Host { /// omitted or the empty string. /// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames /// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `tcp_user_timeout` - The time limit that transmitted data may remain unacknowledged before a connection is forcibly closed. +/// This is ignored for Unix domain socket connections. It is only supported on systems where TCP_USER_TIMEOUT is available +/// and will default to the system default; on other systems, it has no effect. /// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. @@ -160,7 +163,7 @@ pub struct Config { pub(crate) host: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, - pub(crate) user_timeout: Option, + pub(crate) tcp_user_timeout: Option, pub(crate) keepalives: bool, pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, @@ -191,7 +194,7 @@ impl Config { host: vec![], port: vec![], connect_timeout: None, - user_timeout: None, + tcp_user_timeout: None, keepalives: true, keepalive_config, target_session_attrs: TargetSessionAttrs::Any, @@ -343,15 +346,19 @@ impl Config { } /// Sets the TCP user timeout. - pub fn user_timeout(&mut self, user_timeout: Duration) -> &mut Config { - self.user_timeout = Some(user_timeout); + /// + /// This is ignored for Unix domain socket connections. It is only supported on systems where + /// TCP_USER_TIMEOUT is available and will default to the system default; on other systems, + /// it has no effect. + pub fn tcp_user_timeout(&mut self, tcp_user_timeout: Duration) -> &mut Config { + self.tcp_user_timeout = Some(tcp_user_timeout); self } /// Gets the TCP user timeout, if one has been set with the /// `user_timeout` method. - pub fn get_user_timeout(&self) -> Option<&Duration> { - self.user_timeout.as_ref() + pub fn get_tcp_user_timeout(&self) -> Option<&Duration> { + self.tcp_user_timeout.as_ref() } /// Controls the use of TCP keepalive. @@ -488,6 +495,14 @@ impl Config { self.connect_timeout(Duration::from_secs(timeout as u64)); } } + "tcp_user_timeout" => { + let timeout = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("tcp_user_timeout"))))?; + if timeout > 0 { + self.tcp_user_timeout(Duration::from_secs(timeout as u64)); + } + } "keepalives" => { let keepalives = value .parse::() @@ -609,6 +624,7 @@ impl fmt::Debug for Config { .field("host", &self.host) .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) + .field("tcp_user_timeout", &self.tcp_user_timeout) .field("keepalives", &self.keepalives) .field("keepalives_idle", &self.keepalive_config.idle) .field("keepalives_interval", &self.keepalive_config.interval) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 0006be154..ed7ecac66 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -65,7 +65,7 @@ where host, port, config.connect_timeout, - config.user_timeout, + config.tcp_user_timeout, if config.keepalives { Some(&config.keepalive_config) } else { @@ -119,7 +119,7 @@ where host: host.clone(), port, connect_timeout: config.connect_timeout, - user_timeout: config.user_timeout, + tcp_user_timeout: config.tcp_user_timeout, keepalive: if config.keepalives { Some(config.keepalive_config.clone()) } else { diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 3380ccae9..7937df280 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -14,7 +14,7 @@ pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, - user_timeout: Option, + tcp_user_timeout: Option, keepalive_config: Option<&KeepaliveConfig>, ) -> Result { match host { @@ -41,7 +41,7 @@ pub(crate) async fn connect_socket( #[cfg(target_os = "linux")] { sock_ref - .set_tcp_user_timeout(user_timeout) + .set_tcp_user_timeout(tcp_user_timeout) .map_err(Error::timeout)?; } From 071dfa3f3b217a32b1e2ab3db9e6ab5132f2fcd1 Mon Sep 17 00:00:00 2001 From: jaydenelliott Date: Sun, 26 Mar 2023 20:33:29 +1100 Subject: [PATCH 655/819] added a rename_all container attribute for enums and structs --- postgres-derive-test/src/composites.rs | 43 +++++++ postgres-derive-test/src/enums.rs | 29 +++++ postgres-derive/src/case.rs | 158 +++++++++++++++++++++++++ postgres-derive/src/composites.rs | 26 ++-- postgres-derive/src/enums.rs | 13 +- postgres-derive/src/fromsql.rs | 9 +- postgres-derive/src/lib.rs | 1 + postgres-derive/src/overrides.rs | 32 ++++- postgres-derive/src/tosql.rs | 9 +- 9 files changed, 299 insertions(+), 21 deletions(-) create mode 100644 postgres-derive/src/case.rs diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs index a1b76345f..50a22790d 100644 --- a/postgres-derive-test/src/composites.rs +++ b/postgres-derive-test/src/composites.rs @@ -89,6 +89,49 @@ fn name_overrides() { ); } +#[test] +fn rename_all_overrides() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item", rename_all = "SCREAMING_SNAKE_CASE")] + struct InventoryItem { + name: String, + supplier_id: i32, + #[postgres(name = "Price")] + price: Option, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + \"NAME\" TEXT, + \"SUPPLIER_ID\" INT, + \"Price\" DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: Some(15.50), + }; + + let item_null = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: None, + }; + + test_type( + &mut conn, + "inventory_item", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + ); +} + #[test] fn wrong_name() { #[derive(FromSql, ToSql, Debug, PartialEq)] diff --git a/postgres-derive-test/src/enums.rs b/postgres-derive-test/src/enums.rs index a7039ca05..e44f37616 100644 --- a/postgres-derive-test/src/enums.rs +++ b/postgres-derive-test/src/enums.rs @@ -53,6 +53,35 @@ fn name_overrides() { ); } +#[test] +fn rename_all_overrides() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "mood", rename_all = "snake_case")] + enum Mood { + Sad, + #[postgres(name = "okay")] + Ok, + Happy, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE TYPE pg_temp.mood AS ENUM ('sad', 'okay', 'happy')", + &[], + ) + .unwrap(); + + test_type( + &mut conn, + "mood", + &[ + (Mood::Sad, "'sad'"), + (Mood::Ok, "'okay'"), + (Mood::Happy, "'happy'"), + ], + ); +} + #[test] fn wrong_name() { #[derive(Debug, ToSql, FromSql, PartialEq)] diff --git a/postgres-derive/src/case.rs b/postgres-derive/src/case.rs new file mode 100644 index 000000000..b128990c5 --- /dev/null +++ b/postgres-derive/src/case.rs @@ -0,0 +1,158 @@ +#[allow(deprecated, unused_imports)] +use std::ascii::AsciiExt; + +use self::RenameRule::*; + +/// The different possible ways to change case of fields in a struct, or variants in an enum. +#[allow(clippy::enum_variant_names)] +#[derive(Copy, Clone, PartialEq)] +pub enum RenameRule { + /// Rename direct children to "lowercase" style. + LowerCase, + /// Rename direct children to "UPPERCASE" style. + UpperCase, + /// Rename direct children to "PascalCase" style, as typically used for + /// enum variants. + PascalCase, + /// Rename direct children to "camelCase" style. + CamelCase, + /// Rename direct children to "snake_case" style, as commonly used for + /// fields. + SnakeCase, + /// Rename direct children to "SCREAMING_SNAKE_CASE" style, as commonly + /// used for constants. + ScreamingSnakeCase, + /// Rename direct children to "kebab-case" style. + KebabCase, + /// Rename direct children to "SCREAMING-KEBAB-CASE" style. + ScreamingKebabCase, +} + +pub static RENAME_RULES: &[(&str, RenameRule)] = &[ + ("lowercase", LowerCase), + ("UPPERCASE", UpperCase), + ("PascalCase", PascalCase), + ("camelCase", CamelCase), + ("snake_case", SnakeCase), + ("SCREAMING_SNAKE_CASE", ScreamingSnakeCase), + ("kebab-case", KebabCase), + ("SCREAMING-KEBAB-CASE", ScreamingKebabCase), +]; + +impl RenameRule { + /// Apply a renaming rule to an enum variant, returning the version expected in the source. + pub fn apply_to_variant(&self, variant: &str) -> String { + match *self { + PascalCase => variant.to_owned(), + LowerCase => variant.to_ascii_lowercase(), + UpperCase => variant.to_ascii_uppercase(), + CamelCase => variant[..1].to_ascii_lowercase() + &variant[1..], + SnakeCase => { + let mut snake = String::new(); + for (i, ch) in variant.char_indices() { + if i > 0 && ch.is_uppercase() { + snake.push('_'); + } + snake.push(ch.to_ascii_lowercase()); + } + snake + } + ScreamingSnakeCase => SnakeCase.apply_to_variant(variant).to_ascii_uppercase(), + KebabCase => SnakeCase.apply_to_variant(variant).replace('_', "-"), + ScreamingKebabCase => ScreamingSnakeCase + .apply_to_variant(variant) + .replace('_', "-"), + } + } + + /// Apply a renaming rule to a struct field, returning the version expected in the source. + pub fn apply_to_field(&self, field: &str) -> String { + match *self { + LowerCase | SnakeCase => field.to_owned(), + UpperCase => field.to_ascii_uppercase(), + PascalCase => { + let mut pascal = String::new(); + let mut capitalize = true; + for ch in field.chars() { + if ch == '_' { + capitalize = true; + } else if capitalize { + pascal.push(ch.to_ascii_uppercase()); + capitalize = false; + } else { + pascal.push(ch); + } + } + pascal + } + CamelCase => { + let pascal = PascalCase.apply_to_field(field); + pascal[..1].to_ascii_lowercase() + &pascal[1..] + } + ScreamingSnakeCase => field.to_ascii_uppercase(), + KebabCase => field.replace('_', "-"), + ScreamingKebabCase => ScreamingSnakeCase.apply_to_field(field).replace('_', "-"), + } + } +} + +#[test] +fn rename_variants() { + for &(original, lower, upper, camel, snake, screaming, kebab, screaming_kebab) in &[ + ( + "Outcome", "outcome", "OUTCOME", "outcome", "outcome", "OUTCOME", "outcome", "OUTCOME", + ), + ( + "VeryTasty", + "verytasty", + "VERYTASTY", + "veryTasty", + "very_tasty", + "VERY_TASTY", + "very-tasty", + "VERY-TASTY", + ), + ("A", "a", "A", "a", "a", "A", "a", "A"), + ("Z42", "z42", "Z42", "z42", "z42", "Z42", "z42", "Z42"), + ] { + assert_eq!(LowerCase.apply_to_variant(original), lower); + assert_eq!(UpperCase.apply_to_variant(original), upper); + assert_eq!(PascalCase.apply_to_variant(original), original); + assert_eq!(CamelCase.apply_to_variant(original), camel); + assert_eq!(SnakeCase.apply_to_variant(original), snake); + assert_eq!(ScreamingSnakeCase.apply_to_variant(original), screaming); + assert_eq!(KebabCase.apply_to_variant(original), kebab); + assert_eq!( + ScreamingKebabCase.apply_to_variant(original), + screaming_kebab + ); + } +} + +#[test] +fn rename_fields() { + for &(original, upper, pascal, camel, screaming, kebab, screaming_kebab) in &[ + ( + "outcome", "OUTCOME", "Outcome", "outcome", "OUTCOME", "outcome", "OUTCOME", + ), + ( + "very_tasty", + "VERY_TASTY", + "VeryTasty", + "veryTasty", + "VERY_TASTY", + "very-tasty", + "VERY-TASTY", + ), + ("a", "A", "A", "a", "A", "a", "A"), + ("z42", "Z42", "Z42", "z42", "Z42", "z42", "Z42"), + ] { + assert_eq!(UpperCase.apply_to_field(original), upper); + assert_eq!(PascalCase.apply_to_field(original), pascal); + assert_eq!(CamelCase.apply_to_field(original), camel); + assert_eq!(SnakeCase.apply_to_field(original), original); + assert_eq!(ScreamingSnakeCase.apply_to_field(original), screaming); + assert_eq!(KebabCase.apply_to_field(original), kebab); + assert_eq!(ScreamingKebabCase.apply_to_field(original), screaming_kebab); + } +} diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index 15bfabc13..dcff2c581 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -4,7 +4,7 @@ use syn::{ TypeParamBound, }; -use crate::overrides::Overrides; +use crate::{case::RenameRule, overrides::Overrides}; pub struct Field { pub name: String, @@ -13,18 +13,26 @@ pub struct Field { } impl Field { - pub fn parse(raw: &syn::Field) -> Result { + pub fn parse(raw: &syn::Field, rename_all: Option) -> Result { let overrides = Overrides::extract(&raw.attrs)?; - let ident = raw.ident.as_ref().unwrap().clone(); - Ok(Field { - name: overrides.name.unwrap_or_else(|| { + + // field level name override takes precendence over container level rename_all override + let name = match overrides.name { + Some(n) => n, + None => { let name = ident.to_string(); - match name.strip_prefix("r#") { - Some(name) => name.to_string(), - None => name, + let stripped = name.strip_prefix("r#").map(String::from).unwrap_or(name); + + match rename_all { + Some(rule) => rule.apply_to_field(&stripped), + None => stripped, } - }), + } + }; + + Ok(Field { + name, ident, type_: raw.ty.clone(), }) diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs index 3c6bc7113..d99eca1c4 100644 --- a/postgres-derive/src/enums.rs +++ b/postgres-derive/src/enums.rs @@ -1,6 +1,6 @@ use syn::{Error, Fields, Ident}; -use crate::overrides::Overrides; +use crate::{case::RenameRule, overrides::Overrides}; pub struct Variant { pub ident: Ident, @@ -8,7 +8,7 @@ pub struct Variant { } impl Variant { - pub fn parse(raw: &syn::Variant) -> Result { + pub fn parse(raw: &syn::Variant, rename_all: Option) -> Result { match raw.fields { Fields::Unit => {} _ => { @@ -18,11 +18,16 @@ impl Variant { )) } } - let overrides = Overrides::extract(&raw.attrs)?; + + // variant level name override takes precendence over container level rename_all override + let name = overrides.name.unwrap_or_else(|| match rename_all { + Some(rule) => rule.apply_to_variant(&raw.ident.to_string()), + None => raw.ident.to_string(), + }); Ok(Variant { ident: raw.ident.clone(), - name: overrides.name.unwrap_or_else(|| raw.ident.to_string()), + name, }) } } diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index bb87ded5f..3736e01e9 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -24,7 +24,10 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { )); } - let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); + let name = overrides + .name + .clone() + .unwrap_or_else(|| input.ident.to_string()); let (accepts_body, to_sql_body) = if overrides.transparent { match input.data { @@ -51,7 +54,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let variants = data .variants .iter() - .map(Variant::parse) + .map(|variant| Variant::parse(variant, overrides.rename_all)) .collect::, _>>()?; ( accepts::enum_body(&name, &variants), @@ -75,7 +78,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let fields = fields .named .iter() - .map(Field::parse) + .map(|field| Field::parse(field, overrides.rename_all)) .collect::, _>>()?; ( accepts::composite_body(&name, "FromSql", &fields), diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index 98e6add24..b849096c9 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -7,6 +7,7 @@ use proc_macro::TokenStream; use syn::parse_macro_input; mod accepts; +mod case; mod composites; mod enums; mod fromsql; diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index ddb37688b..3918446a2 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -1,8 +1,11 @@ use syn::punctuated::Punctuated; use syn::{Attribute, Error, Expr, ExprLit, Lit, Meta, Token}; +use crate::case::{RenameRule, RENAME_RULES}; + pub struct Overrides { pub name: Option, + pub rename_all: Option, pub transparent: bool, } @@ -10,6 +13,7 @@ impl Overrides { pub fn extract(attrs: &[Attribute]) -> Result { let mut overrides = Overrides { name: None, + rename_all: None, transparent: false, }; @@ -28,7 +32,9 @@ impl Overrides { for item in nested { match item { Meta::NameValue(meta) => { - if !meta.path.is_ident("name") { + let name_override = meta.path.is_ident("name"); + let rename_all_override = meta.path.is_ident("rename_all"); + if !name_override && !rename_all_override { return Err(Error::new_spanned(&meta.path, "unknown override")); } @@ -41,7 +47,29 @@ impl Overrides { } }; - overrides.name = Some(value); + if name_override { + overrides.name = Some(value); + } else if rename_all_override { + let rename_rule = RENAME_RULES + .iter() + .find(|rule| rule.0 == value) + .map(|val| val.1) + .ok_or_else(|| { + Error::new_spanned( + &meta.value, + format!( + "invalid rename_all rule, expected one of: {}", + RENAME_RULES + .iter() + .map(|rule| format!("\"{}\"", rule.0)) + .collect::>() + .join(", ") + ), + ) + })?; + + overrides.rename_all = Some(rename_rule); + } } Meta::Path(path) => { if !path.is_ident("transparent") { diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index e51acc7fd..1e91df4f6 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -22,7 +22,10 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { )); } - let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); + let name = overrides + .name + .clone() + .unwrap_or_else(|| input.ident.to_string()); let (accepts_body, to_sql_body) = if overrides.transparent { match input.data { @@ -47,7 +50,7 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { let variants = data .variants .iter() - .map(Variant::parse) + .map(|variant| Variant::parse(variant, overrides.rename_all)) .collect::, _>>()?; ( accepts::enum_body(&name, &variants), @@ -69,7 +72,7 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { let fields = fields .named .iter() - .map(Field::parse) + .map(|field| Field::parse(field, overrides.rename_all)) .collect::, _>>()?; ( accepts::composite_body(&name, "ToSql", &fields), From bc8ad8aee69f14e367de2f42c8d3a61c1d9c144b Mon Sep 17 00:00:00 2001 From: jaydenelliott Date: Mon, 27 Mar 2023 18:22:53 +1100 Subject: [PATCH 656/819] Distinguish between field and container attributes when parsing --- postgres-derive/src/composites.rs | 2 +- postgres-derive/src/enums.rs | 2 +- postgres-derive/src/fromsql.rs | 2 +- postgres-derive/src/overrides.rs | 8 +++++++- postgres-derive/src/tosql.rs | 2 +- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index dcff2c581..b6aad8ab3 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -14,7 +14,7 @@ pub struct Field { impl Field { pub fn parse(raw: &syn::Field, rename_all: Option) -> Result { - let overrides = Overrides::extract(&raw.attrs)?; + let overrides = Overrides::extract(&raw.attrs, false)?; let ident = raw.ident.as_ref().unwrap().clone(); // field level name override takes precendence over container level rename_all override diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs index d99eca1c4..3e4b5045f 100644 --- a/postgres-derive/src/enums.rs +++ b/postgres-derive/src/enums.rs @@ -18,7 +18,7 @@ impl Variant { )) } } - let overrides = Overrides::extract(&raw.attrs)?; + let overrides = Overrides::extract(&raw.attrs, false)?; // variant level name override takes precendence over container level rename_all override let name = overrides.name.unwrap_or_else(|| match rename_all { diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 3736e01e9..4deb23ed2 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -15,7 +15,7 @@ use crate::enums::Variant; use crate::overrides::Overrides; pub fn expand_derive_fromsql(input: DeriveInput) -> Result { - let overrides = Overrides::extract(&input.attrs)?; + let overrides = Overrides::extract(&input.attrs, true)?; if overrides.name.is_some() && overrides.transparent { return Err(Error::new_spanned( diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index 3918446a2..7f28375bc 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -10,7 +10,7 @@ pub struct Overrides { } impl Overrides { - pub fn extract(attrs: &[Attribute]) -> Result { + pub fn extract(attrs: &[Attribute], container_attr: bool) -> Result { let mut overrides = Overrides { name: None, rename_all: None, @@ -34,6 +34,12 @@ impl Overrides { Meta::NameValue(meta) => { let name_override = meta.path.is_ident("name"); let rename_all_override = meta.path.is_ident("rename_all"); + if !container_attr && rename_all_override { + return Err(Error::new_spanned( + &meta.path, + "rename_all is a container attribute", + )); + } if !name_override && !rename_all_override { return Err(Error::new_spanned(&meta.path, "unknown override")); } diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index 1e91df4f6..dbeeb16c3 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -13,7 +13,7 @@ use crate::enums::Variant; use crate::overrides::Overrides; pub fn expand_derive_tosql(input: DeriveInput) -> Result { - let overrides = Overrides::extract(&input.attrs)?; + let overrides = Overrides::extract(&input.attrs, true)?; if overrides.name.is_some() && overrides.transparent { return Err(Error::new_spanned( From d509b3bc52df9cf0d7f1f2ac5ac64b0bfc643160 Mon Sep 17 00:00:00 2001 From: jaydenelliott Date: Mon, 27 Mar 2023 18:45:05 +1100 Subject: [PATCH 657/819] Replaced case conversion with heck --- postgres-derive/Cargo.toml | 1 + postgres-derive/src/case.rs | 138 ++++++++++--------------------- postgres-derive/src/enums.rs | 2 +- postgres-derive/src/overrides.rs | 30 +++---- 4 files changed, 60 insertions(+), 111 deletions(-) diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 8470bc8a9..cfc8829f4 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -15,3 +15,4 @@ test = false syn = "2.0" proc-macro2 = "1.0" quote = "1.0" +heck = "0.4" \ No newline at end of file diff --git a/postgres-derive/src/case.rs b/postgres-derive/src/case.rs index b128990c5..20ecc8eed 100644 --- a/postgres-derive/src/case.rs +++ b/postgres-derive/src/case.rs @@ -1,6 +1,11 @@ #[allow(deprecated, unused_imports)] use std::ascii::AsciiExt; +use heck::{ + ToKebabCase, ToLowerCamelCase, ToShoutyKebabCase, ToShoutySnakeCase, ToSnakeCase, ToTrainCase, + ToUpperCamelCase, +}; + use self::RenameRule::*; /// The different possible ways to change case of fields in a struct, or variants in an enum. @@ -26,78 +31,56 @@ pub enum RenameRule { KebabCase, /// Rename direct children to "SCREAMING-KEBAB-CASE" style. ScreamingKebabCase, + + /// Rename direct children to "Train-Case" style. + TrainCase, } -pub static RENAME_RULES: &[(&str, RenameRule)] = &[ - ("lowercase", LowerCase), - ("UPPERCASE", UpperCase), - ("PascalCase", PascalCase), - ("camelCase", CamelCase), - ("snake_case", SnakeCase), - ("SCREAMING_SNAKE_CASE", ScreamingSnakeCase), - ("kebab-case", KebabCase), - ("SCREAMING-KEBAB-CASE", ScreamingKebabCase), +pub const RENAME_RULES: &[&str] = &[ + "lowercase", + "UPPERCASE", + "PascalCase", + "camelCase", + "snake_case", + "SCREAMING_SNAKE_CASE", + "kebab-case", + "SCREAMING-KEBAB-CASE", + "Train-Case", ]; impl RenameRule { - /// Apply a renaming rule to an enum variant, returning the version expected in the source. - pub fn apply_to_variant(&self, variant: &str) -> String { - match *self { - PascalCase => variant.to_owned(), - LowerCase => variant.to_ascii_lowercase(), - UpperCase => variant.to_ascii_uppercase(), - CamelCase => variant[..1].to_ascii_lowercase() + &variant[1..], - SnakeCase => { - let mut snake = String::new(); - for (i, ch) in variant.char_indices() { - if i > 0 && ch.is_uppercase() { - snake.push('_'); - } - snake.push(ch.to_ascii_lowercase()); - } - snake - } - ScreamingSnakeCase => SnakeCase.apply_to_variant(variant).to_ascii_uppercase(), - KebabCase => SnakeCase.apply_to_variant(variant).replace('_', "-"), - ScreamingKebabCase => ScreamingSnakeCase - .apply_to_variant(variant) - .replace('_', "-"), + pub fn from_str(rule: &str) -> Option { + match rule { + "lowercase" => Some(LowerCase), + "UPPERCASE" => Some(UpperCase), + "PascalCase" => Some(PascalCase), + "camelCase" => Some(CamelCase), + "snake_case" => Some(SnakeCase), + "SCREAMING_SNAKE_CASE" => Some(ScreamingSnakeCase), + "kebab-case" => Some(KebabCase), + "SCREAMING-KEBAB-CASE" => Some(ScreamingKebabCase), + "Train-Case" => Some(TrainCase), + _ => None, } } - - /// Apply a renaming rule to a struct field, returning the version expected in the source. - pub fn apply_to_field(&self, field: &str) -> String { + /// Apply a renaming rule to an enum or struct field, returning the version expected in the source. + pub fn apply_to_field(&self, variant: &str) -> String { match *self { - LowerCase | SnakeCase => field.to_owned(), - UpperCase => field.to_ascii_uppercase(), - PascalCase => { - let mut pascal = String::new(); - let mut capitalize = true; - for ch in field.chars() { - if ch == '_' { - capitalize = true; - } else if capitalize { - pascal.push(ch.to_ascii_uppercase()); - capitalize = false; - } else { - pascal.push(ch); - } - } - pascal - } - CamelCase => { - let pascal = PascalCase.apply_to_field(field); - pascal[..1].to_ascii_lowercase() + &pascal[1..] - } - ScreamingSnakeCase => field.to_ascii_uppercase(), - KebabCase => field.replace('_', "-"), - ScreamingKebabCase => ScreamingSnakeCase.apply_to_field(field).replace('_', "-"), + LowerCase => variant.to_lowercase(), + UpperCase => variant.to_uppercase(), + PascalCase => variant.to_upper_camel_case(), + CamelCase => variant.to_lower_camel_case(), + SnakeCase => variant.to_snake_case(), + ScreamingSnakeCase => variant.to_shouty_snake_case(), + KebabCase => variant.to_kebab_case(), + ScreamingKebabCase => variant.to_shouty_kebab_case(), + TrainCase => variant.to_train_case(), } } } #[test] -fn rename_variants() { +fn rename_field() { for &(original, lower, upper, camel, snake, screaming, kebab, screaming_kebab) in &[ ( "Outcome", "outcome", "OUTCOME", "outcome", "outcome", "OUTCOME", "outcome", "OUTCOME", @@ -115,42 +98,11 @@ fn rename_variants() { ("A", "a", "A", "a", "a", "A", "a", "A"), ("Z42", "z42", "Z42", "z42", "z42", "Z42", "z42", "Z42"), ] { - assert_eq!(LowerCase.apply_to_variant(original), lower); - assert_eq!(UpperCase.apply_to_variant(original), upper); - assert_eq!(PascalCase.apply_to_variant(original), original); - assert_eq!(CamelCase.apply_to_variant(original), camel); - assert_eq!(SnakeCase.apply_to_variant(original), snake); - assert_eq!(ScreamingSnakeCase.apply_to_variant(original), screaming); - assert_eq!(KebabCase.apply_to_variant(original), kebab); - assert_eq!( - ScreamingKebabCase.apply_to_variant(original), - screaming_kebab - ); - } -} - -#[test] -fn rename_fields() { - for &(original, upper, pascal, camel, screaming, kebab, screaming_kebab) in &[ - ( - "outcome", "OUTCOME", "Outcome", "outcome", "OUTCOME", "outcome", "OUTCOME", - ), - ( - "very_tasty", - "VERY_TASTY", - "VeryTasty", - "veryTasty", - "VERY_TASTY", - "very-tasty", - "VERY-TASTY", - ), - ("a", "A", "A", "a", "A", "a", "A"), - ("z42", "Z42", "Z42", "z42", "Z42", "z42", "Z42"), - ] { + assert_eq!(LowerCase.apply_to_field(original), lower); assert_eq!(UpperCase.apply_to_field(original), upper); - assert_eq!(PascalCase.apply_to_field(original), pascal); + assert_eq!(PascalCase.apply_to_field(original), original); assert_eq!(CamelCase.apply_to_field(original), camel); - assert_eq!(SnakeCase.apply_to_field(original), original); + assert_eq!(SnakeCase.apply_to_field(original), snake); assert_eq!(ScreamingSnakeCase.apply_to_field(original), screaming); assert_eq!(KebabCase.apply_to_field(original), kebab); assert_eq!(ScreamingKebabCase.apply_to_field(original), screaming_kebab); diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs index 3e4b5045f..9a6dfa926 100644 --- a/postgres-derive/src/enums.rs +++ b/postgres-derive/src/enums.rs @@ -22,7 +22,7 @@ impl Variant { // variant level name override takes precendence over container level rename_all override let name = overrides.name.unwrap_or_else(|| match rename_all { - Some(rule) => rule.apply_to_variant(&raw.ident.to_string()), + Some(rule) => rule.apply_to_field(&raw.ident.to_string()), None => raw.ident.to_string(), }); Ok(Variant { diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index 7f28375bc..99faeebb7 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -56,23 +56,19 @@ impl Overrides { if name_override { overrides.name = Some(value); } else if rename_all_override { - let rename_rule = RENAME_RULES - .iter() - .find(|rule| rule.0 == value) - .map(|val| val.1) - .ok_or_else(|| { - Error::new_spanned( - &meta.value, - format!( - "invalid rename_all rule, expected one of: {}", - RENAME_RULES - .iter() - .map(|rule| format!("\"{}\"", rule.0)) - .collect::>() - .join(", ") - ), - ) - })?; + let rename_rule = RenameRule::from_str(&value).ok_or_else(|| { + Error::new_spanned( + &meta.value, + format!( + "invalid rename_all rule, expected one of: {}", + RENAME_RULES + .iter() + .map(|rule| format!("\"{}\"", rule)) + .collect::>() + .join(", ") + ), + ) + })?; overrides.rename_all = Some(rename_rule); } From a9967c05ff40de34e1471bf28cd956e756d1f6f9 Mon Sep 17 00:00:00 2001 From: Paul Hemberger Date: Mon, 27 Mar 2023 16:47:48 -0400 Subject: [PATCH 658/819] docs: mention sys default if 0 --- tokio-postgres/src/config.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index fd848153f..a8aa7a9f5 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -98,7 +98,7 @@ pub enum Host { /// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. /// * `tcp_user_timeout` - The time limit that transmitted data may remain unacknowledged before a connection is forcibly closed. /// This is ignored for Unix domain socket connections. It is only supported on systems where TCP_USER_TIMEOUT is available -/// and will default to the system default; on other systems, it has no effect. +/// and will default to the system default if omitted or set to 0; on other systems, it has no effect. /// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. @@ -348,8 +348,8 @@ impl Config { /// Sets the TCP user timeout. /// /// This is ignored for Unix domain socket connections. It is only supported on systems where - /// TCP_USER_TIMEOUT is available and will default to the system default; on other systems, - /// it has no effect. + /// TCP_USER_TIMEOUT is available and will default to the system default if omitted or set to 0; + /// on other systems, it has no effect. pub fn tcp_user_timeout(&mut self, tcp_user_timeout: Duration) -> &mut Config { self.tcp_user_timeout = Some(tcp_user_timeout); self From 62a443222c8f660438251ef17cb2ca088f48e207 Mon Sep 17 00:00:00 2001 From: Paul Hemberger Date: Mon, 27 Mar 2023 16:47:58 -0400 Subject: [PATCH 659/819] use correct error type --- tokio-postgres/src/connect_socket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 7937df280..9b3d31d72 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -42,7 +42,7 @@ pub(crate) async fn connect_socket( { sock_ref .set_tcp_user_timeout(tcp_user_timeout) - .map_err(Error::timeout)?; + .map_err(Error::connect)?; } if let Some(keepalive_config) = keepalive_config { From 58f06610cc9e0437c7779ce7dad234fa7a2241c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Mar 2023 21:08:40 +0000 Subject: [PATCH 660/819] Update socket2 requirement from 0.4 to 0.5 Updates the requirements on [socket2](https://github.com/rust-lang/socket2) to permit the latest version. - [Release notes](https://github.com/rust-lang/socket2/releases) - [Changelog](https://github.com/rust-lang/socket2/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/socket2/compare/v0.4.0...v0.5.1) --- updated-dependencies: - dependency-name: socket2 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 807698f88..39e41d85c 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -55,7 +55,7 @@ pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } -socket2 = { version = "0.4", features = ["all"] } +socket2 = { version = "0.5", features = ["all"] } tokio = { version = "1.0", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } From a205d23141bb389693bf27a7e8233b5db072a7f5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 18:04:52 -0400 Subject: [PATCH 661/819] Update Cargo.toml --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 39e41d85c..c29852dec 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -56,7 +56,7 @@ phf = "0.11" postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } socket2 = { version = "0.5", features = ["all"] } -tokio = { version = "1.0", features = ["io-util"] } +tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] From 9c0d2dddc43b6137ae5bb0a540b3442b33565fa4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 18:14:05 -0400 Subject: [PATCH 662/819] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f81bdee46..8044b2f47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.63.0 + version: 1.64.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From 5be97d9559ff7077eb77486ca2c789907f58ff9c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 18:59:29 -0400 Subject: [PATCH 663/819] Release postgres-types v0.2.5 --- postgres-types/CHANGELOG.md | 6 ++++++ postgres-types/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 0e2167e5f..0f42f3495 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.2.5 - 2023-03-27 + +## Added + +* Added support for multi-range types. + ## v0.2.4 - 2022-08-20 ## Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 70f1ed54a..35cdd6e7b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.4" +version = "0.2.5" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From a46796798aff0ab30e21abe9160bf1246b354626 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:02:39 -0400 Subject: [PATCH 664/819] Release postgres-protocol v0.6.5 --- postgres-protocol/CHANGELOG.md | 11 +++++++++++ postgres-protocol/Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index d84f29ded..034fd637c 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,16 @@ # Change Log +## v0.6.5 - 2023-03-27 + +### Added + +* Added `message::frontend::flush`. +* Added `DataRowBody::buffer_bytes`. + +### Changed + +* Upgraded `base64`. + ## v0.6.4 - 2022-04-03 ### Added diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 922e92313..e32211369 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.4" +version = "0.6.5" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" From 54390eb3fe8c4766c22cbaba91ffae3885af00e3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:04:38 -0400 Subject: [PATCH 665/819] Release postgres-derive v0.4.4 --- postgres-derive/CHANGELOG.md | 6 ++++++ postgres-derive/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index dde466a97..22714acc2 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.4.4 - 2023-03-27 + +### Changed + +* Upgraded `syn`. + ## v0.4.3 - 2022-09-07 ### Added diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 8470bc8a9..535a64315 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.3" +version = "0.4.4" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" From 117e387a3e44c13068dca613b36037c5de35d65c Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:14:49 -0400 Subject: [PATCH 666/819] Release tokio-postgres v0.7.8 --- tokio-postgres/CHANGELOG.md | 16 ++++++++++++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 91e78b780..3345a1d43 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,21 @@ # Change Log +## v0.7.8 + +## Added + +* Added `keepalives_interval` and `keepalives_retries` config options. +* Added new `SqlState` variants. +* Added more `Debug` impls. +* Added `GenericClient::batch_execute`. +* Added `RowStream::rows_affected`. +* Added the `tcp_user_timeout` config option. + +## Changed + +* Passing an incorrect number of parameters to a query method now returns an error instead of panicking. +* Upgraded `socket2`. + ## v0.7.7 - 2022-08-21 ## Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index c29852dec..e5451e2a2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.7" +version = "0.7.8" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From cf9747e3e54f5d0e12deb23ded2d896c4d16de39 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:20:24 -0400 Subject: [PATCH 667/819] Add tcp_user_timeout to postgres --- postgres/src/config.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b541ec846..95c5ea417 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -44,6 +44,9 @@ use tokio_postgres::{Error, Socket}; /// omitted or the empty string. /// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames /// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `tcp_user_timeout` - The time limit that transmitted data may remain unacknowledged before a connection is forcibly closed. +/// This is ignored for Unix domain socket connections. It is only supported on systems where TCP_USER_TIMEOUT is available +/// and will default to the system default if omitted or set to 0; on other systems, it has no effect. /// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. @@ -256,6 +259,22 @@ impl Config { self.config.get_connect_timeout() } + /// Sets the TCP user timeout. + /// + /// This is ignored for Unix domain socket connections. It is only supported on systems where + /// TCP_USER_TIMEOUT is available and will default to the system default if omitted or set to 0; + /// on other systems, it has no effect. + pub fn tcp_user_timeout(&mut self, tcp_user_timeout: Duration) -> &mut Config { + self.config.tcp_user_timeout(tcp_user_timeout); + self + } + + /// Gets the TCP user timeout, if one has been set with the + /// `user_timeout` method. + pub fn get_tcp_user_timeout(&self) -> Option<&Duration> { + self.config.get_tcp_user_timeout() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. From 7d5962ef3ff811bbfe54cb069a9d0401cf1d92a5 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:24:48 -0400 Subject: [PATCH 668/819] Add RowIter::rows_affected --- postgres/src/row_iter.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 772e9893c..221fdfc68 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -17,6 +17,13 @@ impl<'a> RowIter<'a> { it: Box::pin(stream), } } + + /// Returns the number of rows affected by the query. + /// + /// This function will return `None` until the iterator has been exhausted. + pub fn rows_affected(&self) -> Option { + self.it.rows_affected() + } } impl FallibleIterator for RowIter<'_> { From 65a68dfa463d4779e709234bbcacbf952f265223 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 27 Mar 2023 19:27:16 -0400 Subject: [PATCH 669/819] Release postgres v0.19.5 --- postgres/CHANGELOG.md | 12 ++++++++++++ postgres/Cargo.toml | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index c467c8b73..b8263a04a 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,17 @@ # Change Log +## v0.19.5 - 2023-03-27 + +## Added + +* Added `keepalives_interval` and `keepalives_retries` config options. +* Added the `tcp_user_timeout` config option. +* Added `RowIter::rows_affected`. + +## Changed + +* Passing an incorrect number of parameters to a query method now returns an error instead of panicking. + ## v0.19.4 - 2022-08-21 ## Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index e3a8663f2..e0b2a249d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.4" +version = "0.19.5" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -39,7 +39,7 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] bytes = "1.0" fallible-iterator = "0.2" futures-util = { version = "0.3.14", features = ["sink"] } -tokio-postgres = { version = "0.7.7", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.8", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" From f4b181a20180f1853351be53a32865b6209d0ab4 Mon Sep 17 00:00:00 2001 From: jaydenelliott Date: Tue, 28 Mar 2023 22:25:50 +1100 Subject: [PATCH 670/819] Rename_all attribute documentation --- postgres-derive-test/src/enums.rs | 10 +++++----- postgres-derive/src/fromsql.rs | 4 ++-- postgres-derive/src/tosql.rs | 4 ++-- postgres-types/src/lib.rs | 31 +++++++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 9 deletions(-) diff --git a/postgres-derive-test/src/enums.rs b/postgres-derive-test/src/enums.rs index e44f37616..36d428437 100644 --- a/postgres-derive-test/src/enums.rs +++ b/postgres-derive-test/src/enums.rs @@ -58,15 +58,15 @@ fn rename_all_overrides() { #[derive(Debug, ToSql, FromSql, PartialEq)] #[postgres(name = "mood", rename_all = "snake_case")] enum Mood { - Sad, + VerySad, #[postgres(name = "okay")] Ok, - Happy, + VeryHappy, } let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); conn.execute( - "CREATE TYPE pg_temp.mood AS ENUM ('sad', 'okay', 'happy')", + "CREATE TYPE pg_temp.mood AS ENUM ('very_sad', 'okay', 'very_happy')", &[], ) .unwrap(); @@ -75,9 +75,9 @@ fn rename_all_overrides() { &mut conn, "mood", &[ - (Mood::Sad, "'sad'"), + (Mood::VerySad, "'very_sad'"), (Mood::Ok, "'okay'"), - (Mood::Happy, "'happy'"), + (Mood::VeryHappy, "'very_happy'"), ], ); } diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index 4deb23ed2..a9150411a 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -17,10 +17,10 @@ use crate::overrides::Overrides; pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs, true)?; - if overrides.name.is_some() && overrides.transparent { + if (overrides.name.is_some() || overrides.rename_all.is_some()) && overrides.transparent { return Err(Error::new_spanned( &input, - "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")]", + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")] or #[postgres(rename_all = \"...\")]", )); } diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index dbeeb16c3..ec7602312 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -15,10 +15,10 @@ use crate::overrides::Overrides; pub fn expand_derive_tosql(input: DeriveInput) -> Result { let overrides = Overrides::extract(&input.attrs, true)?; - if overrides.name.is_some() && overrides.transparent { + if (overrides.name.is_some() || overrides.rename_all.is_some()) && overrides.transparent { return Err(Error::new_spanned( &input, - "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")]", + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")] or #[postgres(rename_all = \"...\")]", )); } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index fa49d99eb..5fca049a7 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -125,6 +125,37 @@ //! Happy, //! } //! ``` +//! +//! Alternatively, the `#[postgres(rename_all = "...")]` attribute can be used to rename all fields or variants +//! with the chosen casing convention. This will not affect the struct or enum's type name. Note that +//! `#[postgres(name = "...")]` takes precendence when used in conjunction with `#[postgres(rename_all = "...")]`: +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(name = "mood", rename_all = "snake_case")] +//! enum Mood { +//! VerySad, // very_sad +//! #[postgres(name = "ok")] +//! Ok, // ok +//! VeryHappy, // very_happy +//! } +//! ``` +//! +//! The following case conventions are supported: +//! - `"lowercase"` +//! - `"UPPERCASE"` +//! - `"PascalCase"` +//! - `"camelCase"` +//! - `"snake_case"` +//! - `"SCREAMING_SNAKE_CASE"` +//! - `"kebab-case"` +//! - `"SCREAMING-KEBAB-CASE"` +//! - `"Train-Case"` + #![doc(html_root_url = "https://docs.rs/postgres-types/0.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] From b19fdd4b7ecab1e30e56f55dc95de8d53f9d14da Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Thu, 30 Mar 2023 19:30:40 -0400 Subject: [PATCH 671/819] Fix postgres-protocol constraint Closes #1012 --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index e5451e2a2..4dc93e3a2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -53,7 +53,7 @@ parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" -postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } socket2 = { version = "0.5", features = ["all"] } tokio = { version = "1.27", features = ["io-util"] } From 45d51d708c645f0ebbd3d0dcf5f3eaad3d461916 Mon Sep 17 00:00:00 2001 From: Niklas Hallqvist Date: Tue, 4 Apr 2023 14:27:45 +0200 Subject: [PATCH 672/819] OpenBSD misses some TCP keepalive options --- tokio-postgres/src/keepalive.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs index 74f453985..24d8d2c0e 100644 --- a/tokio-postgres/src/keepalive.rs +++ b/tokio-postgres/src/keepalive.rs @@ -12,12 +12,12 @@ impl From<&KeepaliveConfig> for TcpKeepalive { fn from(keepalive_config: &KeepaliveConfig) -> Self { let mut tcp_keepalive = Self::new().with_time(keepalive_config.idle); - #[cfg(not(any(target_os = "redox", target_os = "solaris")))] + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "openbsd")))] if let Some(interval) = keepalive_config.interval { tcp_keepalive = tcp_keepalive.with_interval(interval); } - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows")))] + #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows", target_os = "openbsd")))] if let Some(retries) = keepalive_config.retries { tcp_keepalive = tcp_keepalive.with_retries(retries); } From e59a16524190db45eead594c61b6a9012ad3a3b9 Mon Sep 17 00:00:00 2001 From: Niklas Hallqvist Date: Tue, 4 Apr 2023 15:43:39 +0200 Subject: [PATCH 673/819] rustfmt --- tokio-postgres/src/keepalive.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs index 24d8d2c0e..c409eb0ea 100644 --- a/tokio-postgres/src/keepalive.rs +++ b/tokio-postgres/src/keepalive.rs @@ -17,7 +17,12 @@ impl From<&KeepaliveConfig> for TcpKeepalive { tcp_keepalive = tcp_keepalive.with_interval(interval); } - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "windows", target_os = "openbsd")))] + #[cfg(not(any( + target_os = "redox", + target_os = "solaris", + target_os = "windows", + target_os = "openbsd" + )))] if let Some(retries) = keepalive_config.retries { tcp_keepalive = tcp_keepalive.with_retries(retries); } From a67fe643a9dc483530ba1df5cf09e3dfdec90c98 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+BastiDood@users.noreply.github.com> Date: Fri, 7 Apr 2023 21:39:37 +0800 Subject: [PATCH 674/819] refactor(types): simplify `<&str as ToSql>::to_sql` --- postgres-types/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index fa49d99eb..c34fbe66d 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1012,10 +1012,10 @@ impl ToSql for Vec { impl<'a> ToSql for &'a str { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - match *ty { - ref ty if ty.name() == "ltree" => types::ltree_to_sql(self, w), - ref ty if ty.name() == "lquery" => types::lquery_to_sql(self, w), - ref ty if ty.name() == "ltxtquery" => types::ltxtquery_to_sql(self, w), + match ty.name() { + "ltree" => types::ltree_to_sql(self, w), + "lquery" => types::lquery_to_sql(self, w), + "ltxtquery" => types::ltxtquery_to_sql(self, w), _ => types::text_to_sql(self, w), } Ok(IsNull::No) From 98abdf9fa25a2e908fd62c5961655e00989fafa2 Mon Sep 17 00:00:00 2001 From: Basti Ortiz <39114273+BastiDood@users.noreply.github.com> Date: Fri, 7 Apr 2023 21:43:25 +0800 Subject: [PATCH 675/819] refactor(types): prefer `matches!` macro for readability --- postgres-types/src/lib.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index c34fbe66d..291e069da 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1022,18 +1022,10 @@ impl<'a> ToSql for &'a str { } fn accepts(ty: &Type) -> bool { - match *ty { - Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty - if (ty.name() == "citext" - || ty.name() == "ltree" - || ty.name() == "lquery" - || ty.name() == "ltxtquery") => - { - true - } - _ => false, - } + matches!( + *ty, + Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN + ) || matches!(ty.name(), "citext" | "ltree" | "lquery" | "ltxtquery") } to_sql_checked!(); From e71335ee43978311b2c1f253afef6c92abdaac88 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 1 May 2023 19:33:49 -0400 Subject: [PATCH 676/819] fix serialization of oidvector --- postgres-types/src/lib.rs | 8 +++++++- tokio-postgres/src/connect_socket.rs | 4 +++- tokio-postgres/tests/test/types/mod.rs | 11 +++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 291e069da..c4c448c4a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -910,9 +910,15 @@ impl<'a, T: ToSql> ToSql for &'a [T] { _ => panic!("expected array type"), }; + // Arrays are normally one indexed by default but oidvector *requires* zero indexing + let lower_bound = match *ty { + Type::OID_VECTOR => 0, + _ => 1, + }; + let dimension = ArrayDimension { len: downcast(self.len())?, - lower_bound: 1, + lower_bound, }; types::array_to_sql( diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 9b3d31d72..1204ca1ff 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -14,7 +14,9 @@ pub(crate) async fn connect_socket( host: &Host, port: u16, connect_timeout: Option, - tcp_user_timeout: Option, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] tcp_user_timeout: Option< + Duration, + >, keepalive_config: Option<&KeepaliveConfig>, ) -> Result { match host { diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 452d149fe..0f1d38242 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -739,3 +739,14 @@ async fn ltxtquery_any() { ) .await; } + +#[tokio::test] +async fn oidvector() { + test_type( + "oidvector", + // NB: postgres does not support empty oidarrays! All empty arrays are normalized to zero dimensions, but the + // oidvectorrecv function requires exactly one dimension. + &[(Some(vec![0u32, 1, 2]), "ARRAY[0,1,2]"), (None, "NULL")], + ) + .await; +} From d92b3b0a63e7abba41d56cebd06356d1a50db879 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 1 May 2023 19:45:54 -0400 Subject: [PATCH 677/819] Fix int2vector serialization --- postgres-types/src/lib.rs | 4 ++-- tokio-postgres/tests/test/types/mod.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index c4c448c4a..b03c389a9 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -910,9 +910,9 @@ impl<'a, T: ToSql> ToSql for &'a [T] { _ => panic!("expected array type"), }; - // Arrays are normally one indexed by default but oidvector *requires* zero indexing + // Arrays are normally one indexed by default but oidvector and int2vector *require* zero indexing let lower_bound = match *ty { - Type::OID_VECTOR => 0, + Type::OID_VECTOR | Type::INT2_VECTOR => 0, _ => 1, }; diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index 0f1d38242..f1a44da08 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -750,3 +750,14 @@ async fn oidvector() { ) .await; } + +#[tokio::test] +async fn int2vector() { + test_type( + "int2vector", + // NB: postgres does not support empty int2vectors! All empty arrays are normalized to zero dimensions, but the + // oidvectorrecv function requires exactly one dimension. + &[(Some(vec![0i16, 1, 2]), "ARRAY[0,1,2]"), (None, "NULL")], + ) + .await; +} From 80adf0448b95548dabd8354ae6988f801e7a5965 Mon Sep 17 00:00:00 2001 From: Ibiyemi Abiodun Date: Sun, 7 May 2023 13:37:52 -0400 Subject: [PATCH 678/819] allow `BorrowToSql` for non-static `Box` --- postgres-types/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 291e069da..6517b4a95 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1178,17 +1178,17 @@ impl BorrowToSql for &dyn ToSql { } } -impl sealed::Sealed for Box {} +impl<'a> sealed::Sealed for Box {} -impl BorrowToSql for Box { +impl<'a> BorrowToSql for Box { #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { self.as_ref() } } -impl sealed::Sealed for Box {} -impl BorrowToSql for Box { +impl<'a> sealed::Sealed for Box {} +impl<'a> BorrowToSql for Box { #[inline] fn borrow_to_sql(&self) -> &dyn ToSql { self.as_ref() From 066b466f4443d0d51c6b1d409f3a2c93019ca27e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 7 May 2023 13:48:50 -0400 Subject: [PATCH 679/819] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8044b2f47..8e91c6faf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,7 +55,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.64.0 + version: 1.65.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From 40954901a422838800a0f99608bf0ab308e5e9aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 14:01:30 +0000 Subject: [PATCH 680/819] Update criterion requirement from 0.4 to 0.5 Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.4.0...0.5.0) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index e0b2a249d..044bb91e1 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -45,5 +45,5 @@ tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" [dev-dependencies] -criterion = "0.4" +criterion = "0.5" tokio = { version = "1.0", features = ["rt-multi-thread"] } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 4dc93e3a2..b5c6d0ae6 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -61,7 +61,7 @@ tokio-util = { version = "0.7", features = ["codec"] } [dev-dependencies] futures-executor = "0.3" -criterion = "0.4" +criterion = "0.5" env_logger = "0.10" tokio = { version = "1.0", features = [ "macros", From 64bf779f7c91524b820e60226a6b8c8075d2dfa4 Mon Sep 17 00:00:00 2001 From: Zeb Piasecki Date: Sat, 3 Jun 2023 09:18:58 -0400 Subject: [PATCH 681/819] feat: add support for wasm Adds support for compiling to WASM environments that provide JS via wasm-bindgen. Because there's no standardized socket API the caller must provide a connection that implements AsyncRead/AsyncWrite to connect_raw. --- Cargo.toml | 1 + postgres-protocol/Cargo.toml | 3 +++ tokio-postgres/Cargo.toml | 4 +++- tokio-postgres/src/config.rs | 42 ++++++++++++++++++++++++++---------- tokio-postgres/src/lib.rs | 1 + 5 files changed, 39 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4752836a7..80a7739c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "codegen", "postgres", diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index e32211369..1c6422e7d 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -19,3 +19,6 @@ memchr = "2.0" rand = "0.8" sha2 = "0.10" stringprep = "0.1" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { version = "0.2.9", features = ["js"] } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b5c6d0ae6..af0e6dee0 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -55,10 +55,12 @@ pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } -socket2 = { version = "0.5", features = ["all"] } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +socket2 = { version = "0.5", features = ["all"] } + [dev-dependencies] futures-executor = "0.3" criterion = "0.5" diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index a8aa7a9f5..2b2be08ef 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -3,6 +3,7 @@ #[cfg(feature = "runtime")] use crate::connect::connect; use crate::connect_raw::connect_raw; +#[cfg(not(target_arch = "wasm32"))] use crate::keepalive::KeepaliveConfig; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; @@ -165,6 +166,7 @@ pub struct Config { pub(crate) connect_timeout: Option, pub(crate) tcp_user_timeout: Option, pub(crate) keepalives: bool, + #[cfg(not(target_arch = "wasm32"))] pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, pub(crate) channel_binding: ChannelBinding, @@ -179,11 +181,6 @@ impl Default for Config { impl Config { /// Creates a new configuration. pub fn new() -> Config { - let keepalive_config = KeepaliveConfig { - idle: Duration::from_secs(2 * 60 * 60), - interval: None, - retries: None, - }; Config { user: None, password: None, @@ -196,7 +193,12 @@ impl Config { connect_timeout: None, tcp_user_timeout: None, keepalives: true, - keepalive_config, + #[cfg(not(target_arch = "wasm32"))] + keepalive_config: KeepaliveConfig { + idle: Duration::from_secs(2 * 60 * 60), + interval: None, + retries: None, + }, target_session_attrs: TargetSessionAttrs::Any, channel_binding: ChannelBinding::Prefer, } @@ -377,6 +379,7 @@ impl Config { /// Sets the amount of idle time before a keepalive packet is sent on the connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. + #[cfg(not(target_arch = "wasm32"))] pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { self.keepalive_config.idle = keepalives_idle; self @@ -384,6 +387,7 @@ impl Config { /// Gets the configured amount of idle time before a keepalive packet will /// be sent on the connection. + #[cfg(not(target_arch = "wasm32"))] pub fn get_keepalives_idle(&self) -> Duration { self.keepalive_config.idle } @@ -392,12 +396,14 @@ impl Config { /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + #[cfg(not(target_arch = "wasm32"))] pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { self.keepalive_config.interval = Some(keepalives_interval); self } /// Gets the time interval between TCP keepalive probes. + #[cfg(not(target_arch = "wasm32"))] pub fn get_keepalives_interval(&self) -> Option { self.keepalive_config.interval } @@ -405,12 +411,14 @@ impl Config { /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + #[cfg(not(target_arch = "wasm32"))] pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { self.keepalive_config.retries = Some(keepalives_retries); self } /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + #[cfg(not(target_arch = "wasm32"))] pub fn get_keepalives_retries(&self) -> Option { self.keepalive_config.retries } @@ -503,12 +511,14 @@ impl Config { self.tcp_user_timeout(Duration::from_secs(timeout as u64)); } } + #[cfg(not(target_arch = "wasm32"))] "keepalives" => { let keepalives = value .parse::() .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; self.keepalives(keepalives != 0); } + #[cfg(not(target_arch = "wasm32"))] "keepalives_idle" => { let keepalives_idle = value .parse::() @@ -517,6 +527,7 @@ impl Config { self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } + #[cfg(not(target_arch = "wasm32"))] "keepalives_interval" => { let keepalives_interval = value.parse::().map_err(|_| { Error::config_parse(Box::new(InvalidValue("keepalives_interval"))) @@ -525,6 +536,7 @@ impl Config { self.keepalives_interval(Duration::from_secs(keepalives_interval as u64)); } } + #[cfg(not(target_arch = "wasm32"))] "keepalives_retries" => { let keepalives_retries = value.parse::().map_err(|_| { Error::config_parse(Box::new(InvalidValue("keepalives_retries"))) @@ -614,7 +626,8 @@ impl fmt::Debug for Config { } } - f.debug_struct("Config") + let mut config_dbg = &mut f.debug_struct("Config"); + config_dbg = config_dbg .field("user", &self.user) .field("password", &self.password.as_ref().map(|_| Redaction {})) .field("dbname", &self.dbname) @@ -625,10 +638,17 @@ impl fmt::Debug for Config { .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) .field("tcp_user_timeout", &self.tcp_user_timeout) - .field("keepalives", &self.keepalives) - .field("keepalives_idle", &self.keepalive_config.idle) - .field("keepalives_interval", &self.keepalive_config.interval) - .field("keepalives_retries", &self.keepalive_config.retries) + .field("keepalives", &self.keepalives); + + #[cfg(not(target_arch = "wasm32"))] + { + config_dbg = config_dbg + .field("keepalives_idle", &self.keepalive_config.idle) + .field("keepalives_interval", &self.keepalive_config.interval) + .field("keepalives_retries", &self.keepalive_config.retries); + } + + config_dbg .field("target_session_attrs", &self.target_session_attrs) .field("channel_binding", &self.channel_binding) .finish() diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index a9ecba4f1..2bb410187 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -163,6 +163,7 @@ mod copy_in; mod copy_out; pub mod error; mod generic_client; +#[cfg(not(target_arch = "wasm32"))] mod keepalive; mod maybe_tls_stream; mod portal; From 2230e88533acccf5632b2d43aff315c88a2507a2 Mon Sep 17 00:00:00 2001 From: Zeb Piasecki Date: Sat, 3 Jun 2023 17:32:48 -0400 Subject: [PATCH 682/819] add CI job for checking wasm Adds a CI job for ensuring the tokio-postgres crate builds on the wasm32-unknown-unknown target without the default features. --- .github/workflows/ci.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8e91c6faf..46f97e48f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,6 +47,33 @@ jobs: key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y - run: cargo clippy --all --all-targets + check-wasm32: + name: check-wasm32 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: sfackler/actions/rustup@master + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT + id: rust-version + - run: rustup target add wasm32-unknown-unknown + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v3 + with: + path: target + key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + - run: cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features + test: name: test runs-on: ubuntu-latest From edc7fdecfb9f81b923bfe904edefd41e7076fa8c Mon Sep 17 00:00:00 2001 From: Zeb Piasecki Date: Sun, 4 Jun 2023 13:02:03 -0400 Subject: [PATCH 683/819] gate wasm support behind feature flag --- Cargo.toml | 1 - postgres-protocol/Cargo.toml | 8 +++++--- tokio-postgres/Cargo.toml | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 80a7739c8..4752836a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,4 @@ [workspace] -resolver = "2" members = [ "codegen", "postgres", diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index 1c6422e7d..ad609f6fa 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -8,6 +8,10 @@ license = "MIT/Apache-2.0" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" +[features] +default = [] +js = ["getrandom/js"] + [dependencies] base64 = "0.21" byteorder = "1.0" @@ -19,6 +23,4 @@ memchr = "2.0" rand = "0.8" sha2 = "0.10" stringprep = "0.1" - -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.9", features = ["js"] } +getrandom = { version = "0.2", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index af0e6dee0..12d8a66fd 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,6 +40,7 @@ with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-uuid-1 = ["postgres-types/with-uuid-1"] with-time-0_2 = ["postgres-types/with-time-0_2"] with-time-0_3 = ["postgres-types/with-time-0_3"] +js = ["postgres-protocol/js"] [dependencies] async-trait = "0.1" From 1f8fb7a16c131ed50a46fc139838327e8a604775 Mon Sep 17 00:00:00 2001 From: Zeb Piasecki Date: Wed, 7 Jun 2023 21:17:54 -0400 Subject: [PATCH 684/819] ignore dev deps in wasm ci --- .github/workflows/ci.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 46f97e48f..99cf652d2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v3 - uses: sfackler/actions/rustup@master - uses: sfackler/actions/rustfmt@master - + clippy: name: clippy runs-on: ubuntu-latest @@ -72,7 +72,12 @@ jobs: with: path: target key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y - - run: cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features + - run: | + # Hack: wasm support currently relies on not having tokio with features like socket enabled. With resolver 1 + # dev dependencies can add unwanted dependencies to the build, so we'll hackily disable them for this check. + + sed -i 's/\[dev-dependencies]/[ignore-dependencies]/g' ./tokio-postgres/Cargo.toml + cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features test: name: test From 635bac4665d4a744a523e6d843f67ffed33b6cff Mon Sep 17 00:00:00 2001 From: Zeb Piasecki Date: Fri, 9 Jun 2023 11:15:06 -0400 Subject: [PATCH 685/819] specify js feature for wasm ci --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 99cf652d2..0064369c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -77,7 +77,7 @@ jobs: # dev dependencies can add unwanted dependencies to the build, so we'll hackily disable them for this check. sed -i 's/\[dev-dependencies]/[ignore-dependencies]/g' ./tokio-postgres/Cargo.toml - cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features + cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features --features js test: name: test From 6f19bb9000bd5e53cd7613f0f96a24c3657533b6 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 10 Jun 2023 10:21:34 -0400 Subject: [PATCH 686/819] clean up wasm32 test --- .github/workflows/ci.yml | 9 ++------- Cargo.toml | 1 + 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0064369c9..ebe0f600f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,13 +71,8 @@ jobs: - uses: actions/cache@v3 with: path: target - key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y - - run: | - # Hack: wasm support currently relies on not having tokio with features like socket enabled. With resolver 1 - # dev dependencies can add unwanted dependencies to the build, so we'll hackily disable them for this check. - - sed -i 's/\[dev-dependencies]/[ignore-dependencies]/g' ./tokio-postgres/Cargo.toml - cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features --features js + key: check-wasm32-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features --features js test: name: test diff --git a/Cargo.toml b/Cargo.toml index 4752836a7..16e3739dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "postgres-types", "tokio-postgres", ] +resolver = "2" [profile.release] debug = 2 From 258fe68f193b7951e20f244ecbbf664d7629f0eb Mon Sep 17 00:00:00 2001 From: Vinicius Hirschle Date: Sat, 29 Apr 2023 21:52:01 -0300 Subject: [PATCH 687/819] feat(derive): add `#[postgres(allow_mismatch)]` --- .../compile-fail/invalid-allow-mismatch.rs | 31 ++++++++ .../invalid-allow-mismatch.stderr | 43 +++++++++++ postgres-derive-test/src/enums.rs | 72 ++++++++++++++++++- postgres-derive/src/accepts.rs | 42 ++++++----- postgres-derive/src/fromsql.rs | 22 +++++- postgres-derive/src/overrides.rs | 22 +++++- postgres-derive/src/tosql.rs | 22 +++++- postgres-types/src/lib.rs | 23 +++++- 8 files changed, 250 insertions(+), 27 deletions(-) create mode 100644 postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs create mode 100644 postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr diff --git a/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs new file mode 100644 index 000000000..52d0ba8f6 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs @@ -0,0 +1,31 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(ToSql, Debug)] +#[postgres(allow_mismatch)] +struct ToSqlAllowMismatchStruct { + a: i32, +} + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch)] +struct FromSqlAllowMismatchStruct { + a: i32, +} + +#[derive(ToSql, Debug)] +#[postgres(allow_mismatch)] +struct ToSqlAllowMismatchTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch)] +struct FromSqlAllowMismatchTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(transparent, allow_mismatch)] +struct TransparentFromSqlAllowMismatchStruct(i32); + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch, transparent)] +struct AllowMismatchFromSqlTransparentStruct(i32); + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr new file mode 100644 index 000000000..a8e573248 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr @@ -0,0 +1,43 @@ +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:4:1 + | +4 | / #[postgres(allow_mismatch)] +5 | | struct ToSqlAllowMismatchStruct { +6 | | a: i32, +7 | | } + | |_^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:10:1 + | +10 | / #[postgres(allow_mismatch)] +11 | | struct FromSqlAllowMismatchStruct { +12 | | a: i32, +13 | | } + | |_^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:16:1 + | +16 | / #[postgres(allow_mismatch)] +17 | | struct ToSqlAllowMismatchTupleStruct(i32, i32); + | |_______________________________________________^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:20:1 + | +20 | / #[postgres(allow_mismatch)] +21 | | struct FromSqlAllowMismatchTupleStruct(i32, i32); + | |_________________________________________________^ + +error: #[postgres(transparent)] is not allowed with #[postgres(allow_mismatch)] + --> src/compile-fail/invalid-allow-mismatch.rs:24:25 + | +24 | #[postgres(transparent, allow_mismatch)] + | ^^^^^^^^^^^^^^ + +error: #[postgres(allow_mismatch)] is not allowed with #[postgres(transparent)] + --> src/compile-fail/invalid-allow-mismatch.rs:28:28 + | +28 | #[postgres(allow_mismatch, transparent)] + | ^^^^^^^^^^^ diff --git a/postgres-derive-test/src/enums.rs b/postgres-derive-test/src/enums.rs index 36d428437..f3e6c488c 100644 --- a/postgres-derive-test/src/enums.rs +++ b/postgres-derive-test/src/enums.rs @@ -1,5 +1,5 @@ use crate::test_type; -use postgres::{Client, NoTls}; +use postgres::{error::DbError, Client, NoTls}; use postgres_types::{FromSql, ToSql, WrongType}; use std::error::Error; @@ -131,3 +131,73 @@ fn missing_variant() { let err = conn.execute("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); assert!(err.source().unwrap().is::()); } + +#[test] +fn allow_mismatch_enums() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.\"Foo\" AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let row = conn.query_one("SELECT $1::\"Foo\"", &[&Foo::Bar]).unwrap(); + assert_eq!(row.get::<_, Foo>(0), Foo::Bar); +} + +#[test] +fn missing_enum_variant() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + Buz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.\"Foo\" AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn + .query_one("SELECT $1::\"Foo\"", &[&Foo::Buz]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn allow_mismatch_and_renaming() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "foo", allow_mismatch)] + enum Foo { + #[postgres(name = "bar")] + Bar, + #[postgres(name = "buz")] + Buz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('bar', 'baz', 'buz')", &[]) + .unwrap(); + + let row = conn.query_one("SELECT $1::foo", &[&Foo::Buz]).unwrap(); + assert_eq!(row.get::<_, Foo>(0), Foo::Buz); +} + +#[test] +fn wrong_name_and_allow_mismatch() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn.query_one("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); + assert!(err.source().unwrap().is::()); +} diff --git a/postgres-derive/src/accepts.rs b/postgres-derive/src/accepts.rs index 63473863a..a68538dcc 100644 --- a/postgres-derive/src/accepts.rs +++ b/postgres-derive/src/accepts.rs @@ -31,31 +31,37 @@ pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { } } -pub fn enum_body(name: &str, variants: &[Variant]) -> TokenStream { +pub fn enum_body(name: &str, variants: &[Variant], allow_mismatch: bool) -> TokenStream { let num_variants = variants.len(); let variant_names = variants.iter().map(|v| &v.name); - quote! { - if type_.name() != #name { - return false; + if allow_mismatch { + quote! { + type_.name() == #name } + } else { + quote! { + if type_.name() != #name { + return false; + } - match *type_.kind() { - ::postgres_types::Kind::Enum(ref variants) => { - if variants.len() != #num_variants { - return false; - } - - variants.iter().all(|v| { - match &**v { - #( - #variant_names => true, - )* - _ => false, + match *type_.kind() { + ::postgres_types::Kind::Enum(ref variants) => { + if variants.len() != #num_variants { + return false; } - }) + + variants.iter().all(|v| { + match &**v { + #( + #variant_names => true, + )* + _ => false, + } + }) + } + _ => false, } - _ => false, } } } diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index a9150411a..d3ac47f4f 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -48,6 +48,26 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { )) } } + } else if overrides.allow_mismatch { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(|variant| Variant::parse(variant, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants, overrides.allow_mismatch), + enum_body(&input.ident, &variants), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(allow_mismatch)] may only be applied to enums", + )); + } + } } else { match input.data { Data::Enum(ref data) => { @@ -57,7 +77,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { .map(|variant| Variant::parse(variant, overrides.rename_all)) .collect::, _>>()?; ( - accepts::enum_body(&name, &variants), + accepts::enum_body(&name, &variants, overrides.allow_mismatch), enum_body(&input.ident, &variants), ) } diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index 99faeebb7..d50550bee 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -7,6 +7,7 @@ pub struct Overrides { pub name: Option, pub rename_all: Option, pub transparent: bool, + pub allow_mismatch: bool, } impl Overrides { @@ -15,6 +16,7 @@ impl Overrides { name: None, rename_all: None, transparent: false, + allow_mismatch: false, }; for attr in attrs { @@ -74,11 +76,25 @@ impl Overrides { } } Meta::Path(path) => { - if !path.is_ident("transparent") { + if path.is_ident("transparent") { + if overrides.allow_mismatch { + return Err(Error::new_spanned( + path, + "#[postgres(allow_mismatch)] is not allowed with #[postgres(transparent)]", + )); + } + overrides.transparent = true; + } else if path.is_ident("allow_mismatch") { + if overrides.transparent { + return Err(Error::new_spanned( + path, + "#[postgres(transparent)] is not allowed with #[postgres(allow_mismatch)]", + )); + } + overrides.allow_mismatch = true; + } else { return Err(Error::new_spanned(path, "unknown override")); } - - overrides.transparent = true; } bad => return Err(Error::new_spanned(bad, "unknown attribute")), } diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index ec7602312..81d4834bf 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -44,6 +44,26 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { )); } } + } else if overrides.allow_mismatch { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(|variant| Variant::parse(variant, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants, overrides.allow_mismatch), + enum_body(&input.ident, &variants), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(allow_mismatch)] may only be applied to enums", + )); + } + } } else { match input.data { Data::Enum(ref data) => { @@ -53,7 +73,7 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { .map(|variant| Variant::parse(variant, overrides.rename_all)) .collect::, _>>()?; ( - accepts::enum_body(&name, &variants), + accepts::enum_body(&name, &variants, overrides.allow_mismatch), enum_body(&input.ident, &variants), ) } diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index edd723977..cb82e2f93 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -138,7 +138,6 @@ //! #[derive(Debug, ToSql, FromSql)] //! #[postgres(name = "mood", rename_all = "snake_case")] //! enum Mood { -//! VerySad, // very_sad //! #[postgres(name = "ok")] //! Ok, // ok //! VeryHappy, // very_happy @@ -155,10 +154,28 @@ //! - `"kebab-case"` //! - `"SCREAMING-KEBAB-CASE"` //! - `"Train-Case"` - +//! +//! ## Allowing Enum Mismatches +//! +//! By default the generated implementation of [`ToSql`] & [`FromSql`] for enums will require an exact match of the enum +//! variants between the Rust and Postgres types. +//! To allow mismatches, the `#[postgres(allow_mismatch)]` attribute can be used on the enum definition: +//! +//! ```sql +//! CREATE TYPE mood AS ENUM ( +//! 'Sad', +//! 'Ok', +//! 'Happy' +//! ); +//! ``` +//! #[postgres(allow_mismatch)] +//! enum Mood { +//! Happy, +//! Meh, +//! } +//! ``` #![doc(html_root_url = "https://docs.rs/postgres-types/0.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] - use fallible_iterator::FallibleIterator; use postgres_protocol::types::{self, ArrayDimension}; use std::any::type_name; From b09e9cc6426728a9df665992a6a1e8cb2c4afbec Mon Sep 17 00:00:00 2001 From: Andrew Baxter Date: Thu, 20 Jul 2023 22:54:19 +0900 Subject: [PATCH 688/819] Add to_sql for bytes Cow as well --- postgres-types/src/lib.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index edd723977..34c8cc0b8 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1035,6 +1035,18 @@ impl ToSql for Box<[T]> { to_sql_checked!(); } +impl<'a> ToSql for Cow<'a, [u8]> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&self.as_ref(), ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[u8] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Vec { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[u8] as ToSql>::to_sql(&&**self, ty, w) From 34c8dc9d1957f6b663c4236217ec7134ad1d3c5b Mon Sep 17 00:00:00 2001 From: andrew <> Date: Thu, 20 Jul 2023 23:30:27 +0900 Subject: [PATCH 689/819] Fixes --- postgres-types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 34c8cc0b8..1f56c468f 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1037,7 +1037,7 @@ impl ToSql for Box<[T]> { impl<'a> ToSql for Cow<'a, [u8]> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - <&str as ToSql>::to_sql(&self.as_ref(), ty, w) + <&[u8] as ToSql>::to_sql(&self.as_ref(), ty, w) } fn accepts(ty: &Type) -> bool { From f7a264473d8ba78a280f1fe173ecb9f3662be7f3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Jul 2023 20:40:47 -0400 Subject: [PATCH 690/819] align hostaddr tls behavior with documentation --- tokio-postgres/src/cancel_query.rs | 14 +++++--------- tokio-postgres/src/cancel_query_raw.rs | 2 +- tokio-postgres/src/cancel_token.rs | 2 +- tokio-postgres/src/client.rs | 1 + tokio-postgres/src/config.rs | 6 +++--- tokio-postgres/src/connect.rs | 25 ++++++++++++++----------- tokio-postgres/src/connect_raw.rs | 2 +- tokio-postgres/src/connect_tls.rs | 9 +++++++-- 8 files changed, 33 insertions(+), 28 deletions(-) diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index d869b5824..8e35a4224 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -1,5 +1,5 @@ use crate::client::SocketConfig; -use crate::config::{Host, SslMode}; +use crate::config::SslMode; use crate::tls::MakeTlsConnect; use crate::{cancel_query_raw, connect_socket, Error, Socket}; use std::io; @@ -24,14 +24,10 @@ where } }; - let hostname = match &config.host { - Host::Tcp(host) => &**host, - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", - }; - let tls = tls - .make_tls_connect(hostname) + let tls = config + .hostname + .map(|s| tls.make_tls_connect(&s)) + .transpose() .map_err(|e| Error::tls(e.into()))?; let socket = connect_socket::connect_socket( diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index c89dc581f..cae887183 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -8,7 +8,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; pub async fn cancel_query_raw( stream: S, mode: SslMode, - tls: T, + tls: Option, process_id: i32, secret_key: i32, ) -> Result<(), Error> diff --git a/tokio-postgres/src/cancel_token.rs b/tokio-postgres/src/cancel_token.rs index d048a3c82..9671de726 100644 --- a/tokio-postgres/src/cancel_token.rs +++ b/tokio-postgres/src/cancel_token.rs @@ -54,7 +54,7 @@ impl CancelToken { cancel_query_raw::cancel_query_raw( stream, self.ssl_mode, - tls, + Some(tls), self.process_id, self.secret_key, ) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 8b7df4e87..ac486813e 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -154,6 +154,7 @@ impl InnerClient { #[derive(Clone)] pub(crate) struct SocketConfig { pub host: Host, + pub hostname: Option, pub port: u16, pub connect_timeout: Option, pub tcp_user_timeout: Option, diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index b18e3b8af..c88c5ff35 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -97,9 +97,9 @@ pub enum Host { /// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, /// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. /// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, -/// - or if host specifies an IP address, that value will be used directly. +/// or if host specifies an IP address, that value will be used directly. /// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications -/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// with time constraints. However, a host name is required for TLS certificate verification. /// Specifically: /// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. /// The connection attempt will fail if the authentication method requires a host name; @@ -645,7 +645,7 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, tls, self).await + connect_raw(stream, Some(tls), self).await } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 32a0a76b9..abb1a0118 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -52,16 +52,17 @@ where .unwrap_or(5432); // The value of host is used as the hostname for TLS validation, - // if it's not present, use the value of hostaddr. let hostname = match host { - Some(Host::Tcp(host)) => host.clone(), + Some(Host::Tcp(host)) => Some(host.clone()), // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] - Some(Host::Unix(_)) => "".to_string(), - None => hostaddr.map_or("".to_string(), |ipaddr| ipaddr.to_string()), + Some(Host::Unix(_)) => None, + None => None, }; - let tls = tls - .make_tls_connect(&hostname) + let tls = hostname + .as_ref() + .map(|s| tls.make_tls_connect(s)) + .transpose() .map_err(|e| Error::tls(e.into()))?; // Try to use the value of hostaddr to establish the TCP connection, @@ -78,7 +79,7 @@ where } }; - match connect_once(&addr, port, tls, config).await { + match connect_once(addr, hostname, port, tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } @@ -88,16 +89,17 @@ where } async fn connect_once( - host: &Host, + host: Host, + hostname: Option, port: u16, - tls: T, + tls: Option, config: &Config, ) -> Result<(Client, Connection), Error> where T: TlsConnect, { let socket = connect_socket( - host, + &host, port, config.connect_timeout, config.tcp_user_timeout, @@ -151,7 +153,8 @@ where } client.set_socket_config(SocketConfig { - host: host.clone(), + host, + hostname, port, connect_timeout: config.connect_timeout, tcp_user_timeout: config.tcp_user_timeout, diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index d97636221..2db6a66b9 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -80,7 +80,7 @@ where pub async fn connect_raw( stream: S, - tls: T, + tls: Option, config: &Config, ) -> Result<(Client, Connection), Error> where diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 5ef21ac5c..d75dcde90 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -10,7 +10,7 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; pub async fn connect_tls( mut stream: S, mode: SslMode, - tls: T, + tls: Option, ) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, @@ -18,7 +18,11 @@ where { match mode { SslMode::Disable => return Ok(MaybeTlsStream::Raw(stream)), - SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { + SslMode::Prefer + if tls + .as_ref() + .map_or(false, |tls| !tls.can_connect(ForcePrivateApi)) => + { return Ok(MaybeTlsStream::Raw(stream)) } SslMode::Prefer | SslMode::Require => {} @@ -40,6 +44,7 @@ where } let stream = tls + .ok_or_else(|| Error::tls("no hostname provided for TLS handshake".into()))? .connect(stream) .await .map_err(|e| Error::tls(e.into()))?; From b57574598ec0985d9b471144fe038886b6d8b92a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 22 Jul 2023 21:09:08 -0400 Subject: [PATCH 691/819] fix test --- tokio-postgres/src/cancel_query.rs | 10 +++++----- tokio-postgres/src/cancel_query_raw.rs | 5 +++-- tokio-postgres/src/cancel_token.rs | 3 ++- tokio-postgres/src/config.rs | 2 +- tokio-postgres/src/connect.rs | 11 +++++------ tokio-postgres/src/connect_raw.rs | 5 +++-- tokio-postgres/src/connect_tls.rs | 14 +++++++------- 7 files changed, 26 insertions(+), 24 deletions(-) diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index 8e35a4224..4a7766d60 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -24,11 +24,10 @@ where } }; - let tls = config - .hostname - .map(|s| tls.make_tls_connect(&s)) - .transpose() + let tls = tls + .make_tls_connect(config.hostname.as_deref().unwrap_or("")) .map_err(|e| Error::tls(e.into()))?; + let has_hostname = config.hostname.is_some(); let socket = connect_socket::connect_socket( &config.host, @@ -39,5 +38,6 @@ where ) .await?; - cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, process_id, secret_key).await + cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, has_hostname, process_id, secret_key) + .await } diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index cae887183..41aafe7d9 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -8,7 +8,8 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; pub async fn cancel_query_raw( stream: S, mode: SslMode, - tls: Option, + tls: T, + has_hostname: bool, process_id: i32, secret_key: i32, ) -> Result<(), Error> @@ -16,7 +17,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let mut stream = connect_tls::connect_tls(stream, mode, tls).await?; + let mut stream = connect_tls::connect_tls(stream, mode, tls, has_hostname).await?; let mut buf = BytesMut::new(); frontend::cancel_request(process_id, secret_key, &mut buf); diff --git a/tokio-postgres/src/cancel_token.rs b/tokio-postgres/src/cancel_token.rs index 9671de726..c925ce0ca 100644 --- a/tokio-postgres/src/cancel_token.rs +++ b/tokio-postgres/src/cancel_token.rs @@ -54,7 +54,8 @@ impl CancelToken { cancel_query_raw::cancel_query_raw( stream, self.ssl_mode, - Some(tls), + tls, + true, self.process_id, self.secret_key, ) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index c88c5ff35..a7fa19312 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -645,7 +645,7 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, Some(tls), self).await + connect_raw(stream, tls, true, self).await } } diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index abb1a0118..441ad1238 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -59,10 +59,8 @@ where Some(Host::Unix(_)) => None, None => None, }; - let tls = hostname - .as_ref() - .map(|s| tls.make_tls_connect(s)) - .transpose() + let tls = tls + .make_tls_connect(hostname.as_deref().unwrap_or("")) .map_err(|e| Error::tls(e.into()))?; // Try to use the value of hostaddr to establish the TCP connection, @@ -92,7 +90,7 @@ async fn connect_once( host: Host, hostname: Option, port: u16, - tls: Option, + tls: T, config: &Config, ) -> Result<(Client, Connection), Error> where @@ -110,7 +108,8 @@ where }, ) .await?; - let (mut client, mut connection) = connect_raw(socket, tls, config).await?; + let has_hostname = hostname.is_some(); + let (mut client, mut connection) = connect_raw(socket, tls, has_hostname, config).await?; if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { let rows = client.simple_query_raw("SHOW transaction_read_only"); diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 2db6a66b9..254ca9f0c 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -80,14 +80,15 @@ where pub async fn connect_raw( stream: S, - tls: Option, + tls: T, + has_hostname: bool, config: &Config, ) -> Result<(Client, Connection), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let stream = connect_tls(stream, config.ssl_mode, tls).await?; + let stream = connect_tls(stream, config.ssl_mode, tls, has_hostname).await?; let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index d75dcde90..2b1229125 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -10,7 +10,8 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; pub async fn connect_tls( mut stream: S, mode: SslMode, - tls: Option, + tls: T, + has_hostname: bool, ) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, @@ -18,11 +19,7 @@ where { match mode { SslMode::Disable => return Ok(MaybeTlsStream::Raw(stream)), - SslMode::Prefer - if tls - .as_ref() - .map_or(false, |tls| !tls.can_connect(ForcePrivateApi)) => - { + SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { return Ok(MaybeTlsStream::Raw(stream)) } SslMode::Prefer | SslMode::Require => {} @@ -43,8 +40,11 @@ where } } + if !has_hostname { + return Err(Error::tls("no hostname provided for TLS handshake".into())); + } + let stream = tls - .ok_or_else(|| Error::tls("no hostname provided for TLS handshake".into()))? .connect(stream) .await .map_err(|e| Error::tls(e.into()))?; From 3346858dd26b20d63eaae8f3db86773b6896b4c3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Jul 2023 09:52:56 -0400 Subject: [PATCH 692/819] Implement load balancing --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/cancel_query.rs | 2 +- tokio-postgres/src/client.rs | 14 ++++- tokio-postgres/src/config.rs | 43 +++++++++++++ tokio-postgres/src/connect.rs | 93 +++++++++++++++++++++------- tokio-postgres/src/connect_socket.rs | 65 +++++++------------ 6 files changed, 149 insertions(+), 69 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 12d8a66fd..12c4bd689 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -58,6 +58,7 @@ postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } +rand = "0.8.5" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] socket2 = { version = "0.5", features = ["all"] } diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index 4a7766d60..078d4b8b6 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -30,7 +30,7 @@ where let has_hostname = config.hostname.is_some(); let socket = connect_socket::connect_socket( - &config.host, + &config.addr, config.port, config.connect_timeout, config.tcp_user_timeout, diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index ac486813e..2185d2146 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,6 +1,4 @@ use crate::codec::{BackendMessages, FrontendMessage}; -#[cfg(feature = "runtime")] -use crate::config::Host; use crate::config::SslMode; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; @@ -27,6 +25,8 @@ use postgres_protocol::message::{backend::Message, frontend}; use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; +use std::net::IpAddr; +use std::path::PathBuf; use std::sync::Arc; use std::task::{Context, Poll}; #[cfg(feature = "runtime")] @@ -153,7 +153,7 @@ impl InnerClient { #[cfg(feature = "runtime")] #[derive(Clone)] pub(crate) struct SocketConfig { - pub host: Host, + pub addr: Addr, pub hostname: Option, pub port: u16, pub connect_timeout: Option, @@ -161,6 +161,14 @@ pub(crate) struct SocketConfig { pub keepalive: Option, } +#[cfg(feature = "runtime")] +#[derive(Clone)] +pub(crate) enum Addr { + Tcp(IpAddr), + #[cfg(unix)] + Unix(PathBuf), +} + /// An asynchronous PostgreSQL client. /// /// The client is one half of what is returned when a connection is established. Users interact with the database diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index a7fa19312..87d77d35a 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -60,6 +60,16 @@ pub enum ChannelBinding { Require, } +/// Load balancing configuration. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum LoadBalanceHosts { + /// Make connection attempts to hosts in the order provided. + Disable, + /// Make connection attempts to hosts in a random order. + Random, +} + /// A host specification. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Host { @@ -129,6 +139,12 @@ pub enum Host { /// * `channel_binding` - Controls usage of channel binding in the authentication process. If set to `disable`, channel /// binding will not be used. If set to `prefer`, channel binding will be used if available, but not used otherwise. /// If set to `require`, the authentication process will fail if channel binding is not used. Defaults to `prefer`. +/// * `load_balance_hosts` - Controls the order in which the client tries to connect to the available hosts and +/// addresses. Once a connection attempt is successful no other hosts and addresses will be tried. This parameter +/// is typically used in combination with multiple host names or a DNS record that returns multiple IPs. If set to +/// `disable`, hosts and addresses will be tried in the order provided. If set to `random`, hosts will be tried +/// in a random order, and the IP addresses resolved from a hostname will also be tried in a random order. Defaults +/// to `disable`. /// /// ## Examples /// @@ -190,6 +206,7 @@ pub struct Config { pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, pub(crate) channel_binding: ChannelBinding, + pub(crate) load_balance_hosts: LoadBalanceHosts, } impl Default for Config { @@ -222,6 +239,7 @@ impl Config { }, target_session_attrs: TargetSessionAttrs::Any, channel_binding: ChannelBinding::Prefer, + load_balance_hosts: LoadBalanceHosts::Disable, } } @@ -489,6 +507,19 @@ impl Config { self.channel_binding } + /// Sets the host load balancing behavior. + /// + /// Defaults to `disable`. + pub fn load_balance_hosts(&mut self, load_balance_hosts: LoadBalanceHosts) -> &mut Config { + self.load_balance_hosts = load_balance_hosts; + self + } + + /// Gets the host load balancing behavior. + pub fn get_load_balance_hosts(&self) -> LoadBalanceHosts { + self.load_balance_hosts + } + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { match key { "user" => { @@ -612,6 +643,18 @@ impl Config { }; self.channel_binding(channel_binding); } + "load_balance_hosts" => { + let load_balance_hosts = match value { + "disable" => LoadBalanceHosts::Disable, + "random" => LoadBalanceHosts::Random, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "load_balance_hosts", + )))) + } + }; + self.load_balance_hosts(load_balance_hosts); + } key => { return Err(Error::config_parse(Box::new(UnknownOption( key.to_string(), diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index 441ad1238..ca57b9cdd 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1,12 +1,14 @@ -use crate::client::SocketConfig; -use crate::config::{Host, TargetSessionAttrs}; +use crate::client::{Addr, SocketConfig}; +use crate::config::{Host, LoadBalanceHosts, TargetSessionAttrs}; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; -use crate::tls::{MakeTlsConnect, TlsConnect}; +use crate::tls::MakeTlsConnect; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; use futures_util::{future, pin_mut, Future, FutureExt, Stream}; +use rand::seq::SliceRandom; use std::task::Poll; use std::{cmp, io}; +use tokio::net; pub async fn connect( mut tls: T, @@ -40,8 +42,13 @@ where return Err(Error::config("invalid number of ports".into())); } + let mut indices = (0..num_hosts).collect::>(); + if config.load_balance_hosts == LoadBalanceHosts::Random { + indices.shuffle(&mut rand::thread_rng()); + } + let mut error = None; - for i in 0..num_hosts { + for i in indices { let host = config.host.get(i); let hostaddr = config.hostaddr.get(i); let port = config @@ -59,25 +66,15 @@ where Some(Host::Unix(_)) => None, None => None, }; - let tls = tls - .make_tls_connect(hostname.as_deref().unwrap_or("")) - .map_err(|e| Error::tls(e.into()))?; // Try to use the value of hostaddr to establish the TCP connection, // fallback to host if hostaddr is not present. let addr = match hostaddr { Some(ipaddr) => Host::Tcp(ipaddr.to_string()), - None => { - if let Some(host) = host { - host.clone() - } else { - // This is unreachable. - return Err(Error::config("both host and hostaddr are empty".into())); - } - } + None => host.cloned().unwrap(), }; - match connect_once(addr, hostname, port, tls, config).await { + match connect_host(addr, hostname, port, &mut tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } @@ -86,18 +83,66 @@ where Err(error.unwrap()) } -async fn connect_once( +async fn connect_host( host: Host, hostname: Option, port: u16, - tls: T, + tls: &mut T, + config: &Config, +) -> Result<(Client, Connection), Error> +where + T: MakeTlsConnect, +{ + match host { + Host::Tcp(host) => { + let mut addrs = net::lookup_host((&*host, port)) + .await + .map_err(Error::connect)? + .collect::>(); + + if config.load_balance_hosts == LoadBalanceHosts::Random { + addrs.shuffle(&mut rand::thread_rng()); + } + + let mut last_err = None; + for addr in addrs { + match connect_once(Addr::Tcp(addr.ip()), hostname.as_deref(), port, tls, config) + .await + { + Ok(stream) => return Ok(stream), + Err(e) => { + last_err = Some(e); + continue; + } + }; + } + + Err(last_err.unwrap_or_else(|| { + Error::connect(io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve any addresses", + )) + })) + } + #[cfg(unix)] + Host::Unix(path) => { + connect_once(Addr::Unix(path), hostname.as_deref(), port, tls, config).await + } + } +} + +async fn connect_once( + addr: Addr, + hostname: Option<&str>, + port: u16, + tls: &mut T, config: &Config, ) -> Result<(Client, Connection), Error> where - T: TlsConnect, + T: MakeTlsConnect, { let socket = connect_socket( - &host, + &addr, port, config.connect_timeout, config.tcp_user_timeout, @@ -108,6 +153,10 @@ where }, ) .await?; + + let tls = tls + .make_tls_connect(hostname.unwrap_or("")) + .map_err(|e| Error::tls(e.into()))?; let has_hostname = hostname.is_some(); let (mut client, mut connection) = connect_raw(socket, tls, has_hostname, config).await?; @@ -152,8 +201,8 @@ where } client.set_socket_config(SocketConfig { - host, - hostname, + addr, + hostname: hostname.map(|s| s.to_string()), port, connect_timeout: config.connect_timeout, tcp_user_timeout: config.tcp_user_timeout, diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 1204ca1ff..082cad5dc 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,17 +1,17 @@ -use crate::config::Host; +use crate::client::Addr; use crate::keepalive::KeepaliveConfig; use crate::{Error, Socket}; use socket2::{SockRef, TcpKeepalive}; use std::future::Future; use std::io; use std::time::Duration; +use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; -use tokio::net::{self, TcpStream}; use tokio::time; pub(crate) async fn connect_socket( - host: &Host, + addr: &Addr, port: u16, connect_timeout: Option, #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] tcp_user_timeout: Option< @@ -19,53 +19,32 @@ pub(crate) async fn connect_socket( >, keepalive_config: Option<&KeepaliveConfig>, ) -> Result { - match host { - Host::Tcp(host) => { - let addrs = net::lookup_host((&**host, port)) - .await - .map_err(Error::connect)?; + match addr { + Addr::Tcp(ip) => { + let stream = + connect_with_timeout(TcpStream::connect((*ip, port)), connect_timeout).await?; - let mut last_err = None; + stream.set_nodelay(true).map_err(Error::connect)?; - for addr in addrs { - let stream = - match connect_with_timeout(TcpStream::connect(addr), connect_timeout).await { - Ok(stream) => stream, - Err(e) => { - last_err = Some(e); - continue; - } - }; - - stream.set_nodelay(true).map_err(Error::connect)?; - - let sock_ref = SockRef::from(&stream); - #[cfg(target_os = "linux")] - { - sock_ref - .set_tcp_user_timeout(tcp_user_timeout) - .map_err(Error::connect)?; - } - - if let Some(keepalive_config) = keepalive_config { - sock_ref - .set_tcp_keepalive(&TcpKeepalive::from(keepalive_config)) - .map_err(Error::connect)?; - } + let sock_ref = SockRef::from(&stream); + #[cfg(target_os = "linux")] + { + sock_ref + .set_tcp_user_timeout(tcp_user_timeout) + .map_err(Error::connect)?; + } - return Ok(Socket::new_tcp(stream)); + if let Some(keepalive_config) = keepalive_config { + sock_ref + .set_tcp_keepalive(&TcpKeepalive::from(keepalive_config)) + .map_err(Error::connect)?; } - Err(last_err.unwrap_or_else(|| { - Error::connect(io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve any addresses", - )) - })) + return Ok(Socket::new_tcp(stream)); } #[cfg(unix)] - Host::Unix(path) => { - let path = path.join(format!(".s.PGSQL.{}", port)); + Addr::Unix(dir) => { + let path = dir.join(format!(".s.PGSQL.{}", port)); let socket = connect_with_timeout(UnixStream::connect(path), connect_timeout).await?; Ok(Socket::new_unix(socket)) } From babc8562276cb51288671530045faa094ee7f35d Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Jul 2023 09:55:27 -0400 Subject: [PATCH 693/819] clippy --- tokio-postgres/src/connect_socket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 082cad5dc..f27131178 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -40,7 +40,7 @@ pub(crate) async fn connect_socket( .map_err(Error::connect)?; } - return Ok(Socket::new_tcp(stream)); + Ok(Socket::new_tcp(stream)) } #[cfg(unix)] Addr::Unix(dir) => { From 84aed6312fb01ffa7664290b86af5e442ed8f6e9 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 23 Jul 2023 09:56:32 -0400 Subject: [PATCH 694/819] fix wasm build --- tokio-postgres/src/client.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 2185d2146..427a05049 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -25,7 +25,9 @@ use postgres_protocol::message::{backend::Message, frontend}; use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; +#[cfg(feature = "runtime")] use std::net::IpAddr; +#[cfg(feature = "runtime")] use std::path::PathBuf; use std::sync::Arc; use std::task::{Context, Poll}; From 98814b86bbe1c0daac2f29ffd55c675199b1877a Mon Sep 17 00:00:00 2001 From: Sebastian Schmidt Date: Sat, 19 Aug 2023 16:22:18 +0300 Subject: [PATCH 695/819] Set user to executing processes' user by default. This mimics the behaviour of libpq and some other libraries (see #1024). This commit uses the `whoami` crate, and thus goes as far as defaulting the user to the executing process' user name on all operating systems. --- tokio-postgres/Cargo.toml | 1 + tokio-postgres/src/config.rs | 21 +++++++++++---------- tokio-postgres/src/connect_raw.rs | 9 ++------- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 12c4bd689..29cf26829 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -59,6 +59,7 @@ postgres-types = { version = "0.2.4", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } rand = "0.8.5" +whoami = "1.4.1" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] socket2 = { version = "0.5", features = ["all"] } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 87d77d35a..a94667dc9 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -93,7 +93,7 @@ pub enum Host { /// /// ## Keys /// -/// * `user` - The username to authenticate with. Required. +/// * `user` - The username to authenticate with. Defaults to the user executing this process. /// * `password` - The password to authenticate with. /// * `dbname` - The name of the database to connect to. Defaults to the username. /// * `options` - Command line options used to configure the server. @@ -190,7 +190,7 @@ pub enum Host { /// ``` #[derive(Clone, PartialEq, Eq)] pub struct Config { - pub(crate) user: Option, + user: String, pub(crate) password: Option>, pub(crate) dbname: Option, pub(crate) options: Option, @@ -219,7 +219,7 @@ impl Config { /// Creates a new configuration. pub fn new() -> Config { Config { - user: None, + user: whoami::username(), password: None, dbname: None, options: None, @@ -245,16 +245,17 @@ impl Config { /// Sets the user to authenticate with. /// - /// Required. + /// If the user is not set, then this defaults to the user executing this process. pub fn user(&mut self, user: &str) -> &mut Config { - self.user = Some(user.to_string()); + self.user = user.to_string(); self } - /// Gets the user to authenticate with, if one has been configured with - /// the `user` method. - pub fn get_user(&self) -> Option<&str> { - self.user.as_deref() + /// Gets the user to authenticate with. + /// If no user has been configured with the [`user`](Config::user) method, + /// then this defaults to the user executing this process. + pub fn get_user(&self) -> &str { + &self.user } /// Sets the password to authenticate with. @@ -1124,7 +1125,7 @@ mod tests { fn test_simple_parsing() { let s = "user=pass_user dbname=postgres host=host1,host2 hostaddr=127.0.0.1,127.0.0.2 port=26257"; let config = s.parse::().unwrap(); - assert_eq!(Some("pass_user"), config.get_user()); + assert_eq!("pass_user", config.get_user()); assert_eq!(Some("postgres"), config.get_dbname()); assert_eq!( [ diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 254ca9f0c..bb511c47e 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -113,9 +113,7 @@ where T: AsyncRead + AsyncWrite + Unpin, { let mut params = vec![("client_encoding", "UTF8")]; - if let Some(user) = &config.user { - params.push(("user", &**user)); - } + params.push(("user", config.get_user())); if let Some(dbname) = &config.dbname { params.push(("database", &**dbname)); } @@ -158,10 +156,7 @@ where Some(Message::AuthenticationMd5Password(body)) => { can_skip_channel_binding(config)?; - let user = config - .user - .as_ref() - .ok_or_else(|| Error::config("user missing".into()))?; + let user = config.get_user(); let pass = config .password .as_ref() From 4c4059a63d273b94badf1c90998ffaa7c67091c0 Mon Sep 17 00:00:00 2001 From: Sebastian Schmidt Date: Sat, 19 Aug 2023 18:48:57 +0300 Subject: [PATCH 696/819] Propagate changes from `tokio-postgres` to `postgres`. --- postgres/src/config.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 2a8e63862..0e1fbde62 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -29,7 +29,7 @@ use tokio_postgres::{Error, Socket}; /// /// ## Keys /// -/// * `user` - The username to authenticate with. Required. +/// * `user` - The username to authenticate with. Defaults to the user executing this process. /// * `password` - The password to authenticate with. /// * `dbname` - The name of the database to connect to. Defaults to the username. /// * `options` - Command line options used to configure the server. @@ -143,15 +143,16 @@ impl Config { /// Sets the user to authenticate with. /// - /// Required. + /// If the user is not set, then this defaults to the user executing this process. pub fn user(&mut self, user: &str) -> &mut Config { self.config.user(user); self } - /// Gets the user to authenticate with, if one has been configured with - /// the `user` method. - pub fn get_user(&self) -> Option<&str> { + /// Gets the user to authenticate with. + /// If no user has been configured with the [`user`](Config::user) method, + /// then this defaults to the user executing this process. + pub fn get_user(&self) -> &str { self.config.get_user() } From 7a5b19a7861d784a0a743f89447d4c732ac44b90 Mon Sep 17 00:00:00 2001 From: Sebastian Schmidt Date: Sat, 19 Aug 2023 19:09:00 +0300 Subject: [PATCH 697/819] Update Rust version in CI to 1.67.0. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ebe0f600f..9a669a40f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.65.0 + version: 1.67.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From a4543783707cc2fdbba3db4bfe1fc6168582de7e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 19:53:26 -0400 Subject: [PATCH 698/819] Restore back compat --- postgres/src/config.rs | 7 +++++-- tokio-postgres/src/config.rs | 15 +++++++++------ tokio-postgres/src/connect_raw.rs | 4 ++-- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 0e1fbde62..1839c9cb3 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -150,9 +150,12 @@ impl Config { } /// Gets the user to authenticate with. + /// /// If no user has been configured with the [`user`](Config::user) method, - /// then this defaults to the user executing this process. - pub fn get_user(&self) -> &str { + /// then this defaults to the user executing this process. It always + /// returns `Some`. + // FIXME remove option + pub fn get_user(&self) -> Option<&str> { self.config.get_user() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index a94667dc9..0da5fc689 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -190,7 +190,7 @@ pub enum Host { /// ``` #[derive(Clone, PartialEq, Eq)] pub struct Config { - user: String, + pub(crate) user: String, pub(crate) password: Option>, pub(crate) dbname: Option, pub(crate) options: Option, @@ -245,17 +245,20 @@ impl Config { /// Sets the user to authenticate with. /// - /// If the user is not set, then this defaults to the user executing this process. + /// Defaults to the user executing this process. pub fn user(&mut self, user: &str) -> &mut Config { self.user = user.to_string(); self } /// Gets the user to authenticate with. + /// /// If no user has been configured with the [`user`](Config::user) method, - /// then this defaults to the user executing this process. - pub fn get_user(&self) -> &str { - &self.user + /// then this defaults to the user executing this process. It always + /// returns `Some`. + // FIXME remove option + pub fn get_user(&self) -> Option<&str> { + Some(&self.user) } /// Sets the password to authenticate with. @@ -1125,7 +1128,7 @@ mod tests { fn test_simple_parsing() { let s = "user=pass_user dbname=postgres host=host1,host2 hostaddr=127.0.0.1,127.0.0.2 port=26257"; let config = s.parse::().unwrap(); - assert_eq!("pass_user", config.get_user()); + assert_eq!(Some("pass_user"), config.get_user()); assert_eq!(Some("postgres"), config.get_dbname()); assert_eq!( [ diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index bb511c47e..11cc48ef8 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -113,7 +113,7 @@ where T: AsyncRead + AsyncWrite + Unpin, { let mut params = vec![("client_encoding", "UTF8")]; - params.push(("user", config.get_user())); + params.push(("user", &config.user)); if let Some(dbname) = &config.dbname { params.push(("database", &**dbname)); } @@ -156,7 +156,7 @@ where Some(Message::AuthenticationMd5Password(body)) => { can_skip_channel_binding(config)?; - let user = config.get_user(); + let user = &config.user; let pass = config .password .as_ref() From 496f46c8f5e8e76e0b148c7ef57dbccc11778597 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:04:18 -0400 Subject: [PATCH 699/819] Release postgres-protocol v0.6.6 --- postgres-protocol/CHANGELOG.md | 6 ++++++ postgres-protocol/Cargo.toml | 2 +- postgres-protocol/src/lib.rs | 1 - postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 034fd637c..1c371675c 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.6.6 -2023-08-19 + +### Added + +* Added the `js` feature for WASM support. + ## v0.6.5 - 2023-03-27 ### Added diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ad609f6fa..b44994811 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.5" +version = "0.6.6" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 8b6ff508d..83d9bf55c 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,6 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.6")] #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 35cdd6e7b..686d0036d 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -30,7 +30,7 @@ with-time-0_3 = ["time-03"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.6.4", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } postgres-derive = { version = "0.4.2", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 29cf26829..f9f49da3e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -54,7 +54,7 @@ parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" -postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.6", path = "../postgres-protocol" } postgres-types = { version = "0.2.4", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } From 43e15690f492f3ae8088677fd8d5df18f73b3e85 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:11:35 -0400 Subject: [PATCH 700/819] Release postgres-derive v0.4.5 --- postgres-derive/CHANGELOG.md | 7 +++++++ postgres-derive/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 22714acc2..b0075fa8e 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,12 @@ # Change Log +## v0.4.5 - 2023-08-19 + +### Added + +* Added a `rename_all` option for enum and struct derives. +* Added an `allow_mismatch` option to disable strict enum variant checks against the Postgres type. + ## v0.4.4 - 2023-03-27 ### Changed diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 78bec3d41..51ebb5663 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-derive" -version = "0.4.4" +version = "0.4.5" authors = ["Steven Fackler "] license = "MIT/Apache-2.0" edition = "2018" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 686d0036d..15de00702 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -31,7 +31,7 @@ with-time-0_3 = ["time-03"] bytes = "1.0" fallible-iterator = "0.2" postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } -postgres-derive = { version = "0.4.2", optional = true, path = "../postgres-derive" } +postgres-derive = { version = "0.4.5", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } From 6f7ab44d5bc8548a4e7fb69d46d3b85a14101144 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:14:01 -0400 Subject: [PATCH 701/819] Release postgres-types v0.2.6 --- postgres-types/CHANGELOG.md | 15 +++++++++++++-- postgres-types/Cargo.toml | 2 +- postgres-types/src/lib.rs | 1 - 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 0f42f3495..72a1cbb6a 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,14 +1,25 @@ # Change Log +## v0.2.6 - 2023-08-19 + +### Fixed + +* Fixed serialization to `OIDVECTOR` and `INT2VECTOR`. + +### Added + +* Removed the `'static` requirement for the `impl BorrowToSql for Box`. +* Added a `ToSql` implementation for `Cow<[u8]>`. + ## v0.2.5 - 2023-03-27 -## Added +### Added * Added support for multi-range types. ## v0.2.4 - 2022-08-20 -## Added +### Added * Added `ToSql` and `FromSql` implementations for `Box<[T]>`. * Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 15de00702..193d159a1 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.5" +version = "0.2.6" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index d27adfe0e..52b5c773a 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -174,7 +174,6 @@ //! Meh, //! } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-types/0.2")] #![warn(clippy::all, rust_2018_idioms, missing_docs)] use fallible_iterator::FallibleIterator; use postgres_protocol::types::{self, ArrayDimension}; From 3d0a593ea610fb51b25a34087131470c94e3fe58 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:20:13 -0400 Subject: [PATCH 702/819] Release tokio-postgres v0.7.9 --- tokio-postgres/CHANGELOG.md | 13 +++++++++++++ tokio-postgres/Cargo.toml | 4 ++-- tokio-postgres/src/lib.rs | 1 - 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 3345a1d43..41a1a65d1 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,18 @@ # Change Log +## v0.7.9 + +## Fixed + +* Fixed builds on OpenBSD. + +## Added + +* Added the `js` feature for WASM support. +* Added support for the `hostaddr` config option to bypass DNS lookups. +* Added support for the `load_balance_hosts` config option to randomize connection ordering. +* The `user` config option now defaults to the executing process's user. + ## v0.7.8 ## Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f9f49da3e..3b33cc8f6 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.8" +version = "0.7.9" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -55,7 +55,7 @@ percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.6", path = "../postgres-protocol" } -postgres-types = { version = "0.2.4", path = "../postgres-types" } +postgres-types = { version = "0.2.5", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } rand = "0.8.5" diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2bb410187..ff8e93ddc 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -116,7 +116,6 @@ //! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | //! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | //! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.7")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] pub use crate::cancel_token::CancelToken; From e08a38f9f6f06a67d699209d54097fa8a567a578 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:33:21 -0400 Subject: [PATCH 703/819] sync postgres config up with tokio-postgres --- postgres/src/config.rs | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 1839c9cb3..0f936fdc4 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -13,7 +13,9 @@ use std::sync::Arc; use std::time::Duration; use tokio::runtime; #[doc(inline)] -pub use tokio_postgres::config::{ChannelBinding, Host, SslMode, TargetSessionAttrs}; +pub use tokio_postgres::config::{ + ChannelBinding, Host, LoadBalanceHosts, SslMode, TargetSessionAttrs, +}; use tokio_postgres::error::DbError; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; @@ -43,9 +45,9 @@ use tokio_postgres::{Error, Socket}; /// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, /// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. /// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, -/// - or if host specifies an IP address, that value will be used directly. +/// or if host specifies an IP address, that value will be used directly. /// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications -/// with time constraints. However, a host name is required for verify-full SSL certificate verification. +/// with time constraints. However, a host name is required for TLS certificate verification. /// Specifically: /// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. /// The connection attempt will fail if the authentication method requires a host name; @@ -72,6 +74,15 @@ use tokio_postgres::{Error, Socket}; /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// * `channel_binding` - Controls usage of channel binding in the authentication process. If set to `disable`, channel +/// binding will not be used. If set to `prefer`, channel binding will be used if available, but not used otherwise. +/// If set to `require`, the authentication process will fail if channel binding is not used. Defaults to `prefer`. +/// * `load_balance_hosts` - Controls the order in which the client tries to connect to the available hosts and +/// addresses. Once a connection attempt is successful no other hosts and addresses will be tried. This parameter +/// is typically used in combination with multiple host names or a DNS record that returns multiple IPs. If set to +/// `disable`, hosts and addresses will be tried in the order provided. If set to `random`, hosts will be tried +/// in a random order, and the IP addresses resolved from a hostname will also be tried in a random order. Defaults +/// to `disable`. /// /// ## Examples /// @@ -80,7 +91,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// host=/var/run/postgresql,localhost port=1234 user=postgres password='password with spaces' +/// host=/var/lib/postgresql,localhost port=1234 user=postgres password='password with spaces' /// ``` /// /// ```not_rust @@ -94,7 +105,7 @@ use tokio_postgres::{Error, Socket}; /// # Url /// /// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, -/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// and the format accepts query parameters for all of the key-value pairs described in the section above. Multiple /// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, /// as the path component of the URL specifies the database name. /// @@ -105,7 +116,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql://user:password@%2Fvar%2Frun%2Fpostgresql/mydb?connect_timeout=10 +/// postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10 /// ``` /// /// ```not_rust @@ -113,7 +124,7 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust -/// postgresql:///mydb?user=user&host=/var/run/postgresql +/// postgresql:///mydb?user=user&host=/var/lib/postgresql /// ``` #[derive(Clone)] pub struct Config { @@ -396,6 +407,19 @@ impl Config { self.config.get_channel_binding() } + /// Sets the host load balancing behavior. + /// + /// Defaults to `disable`. + pub fn load_balance_hosts(&mut self, load_balance_hosts: LoadBalanceHosts) -> &mut Config { + self.config.load_balance_hosts(load_balance_hosts); + self + } + + /// Gets the host load balancing behavior. + pub fn get_load_balance_hosts(&self) -> LoadBalanceHosts { + self.config.get_load_balance_hosts() + } + /// Sets the notice callback. /// /// This callback will be invoked with the contents of every From f45527fe5f4f566328973097511a33d771d3f300 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:34:02 -0400 Subject: [PATCH 704/819] remove bogus docs --- postgres/src/config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index 0f936fdc4..f83244b2e 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -1,6 +1,4 @@ //! Connection configuration. -//! -//! Requires the `runtime` Cargo feature (enabled by default). use crate::connection::Connection; use crate::Client; From 75cc986d8c40024eca45139edc6c366231d147ea Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 19 Aug 2023 20:37:16 -0400 Subject: [PATCH 705/819] Release postgres v0.19.6 --- postgres/CHANGELOG.md | 14 +++++++++++--- postgres/Cargo.toml | 8 +++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index b8263a04a..fe9e8dbe8 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,20 +1,28 @@ # Change Log +## v0.19.6 - 2023-08-19 + +### Added + +* Added support for the `hostaddr` config option to bypass DNS lookups. +* Added support for the `load_balance_hosts` config option to randomize connection ordering. +* The `user` config option now defaults to the executing process's user. + ## v0.19.5 - 2023-03-27 -## Added +### Added * Added `keepalives_interval` and `keepalives_retries` config options. * Added the `tcp_user_timeout` config option. * Added `RowIter::rows_affected`. -## Changed +### Changed * Passing an incorrect number of parameters to a query method now returns an error instead of panicking. ## v0.19.4 - 2022-08-21 -## Added +### Added * Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. * Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 044bb91e1..ff626f86c 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.5" +version = "0.19.6" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -39,11 +39,9 @@ with-time-0_3 = ["tokio-postgres/with-time-0_3"] bytes = "1.0" fallible-iterator = "0.2" futures-util = { version = "0.3.14", features = ["sink"] } -tokio-postgres = { version = "0.7.8", path = "../tokio-postgres" } - -tokio = { version = "1.0", features = ["rt", "time"] } log = "0.4" +tokio-postgres = { version = "0.7.9", path = "../tokio-postgres" } +tokio = { version = "1.0", features = ["rt", "time"] } [dev-dependencies] criterion = "0.5" -tokio = { version = "1.0", features = ["rt-multi-thread"] } From cb609be758f3fb5af537f04b584a2ee0cebd5e79 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:31:22 -0400 Subject: [PATCH 706/819] Defer username default --- postgres/src/config.rs | 8 ++------ tokio-postgres/src/config.rs | 16 ++++++---------- tokio-postgres/src/connect_raw.rs | 21 +++++++++++++++------ 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/postgres/src/config.rs b/postgres/src/config.rs index f83244b2e..a32ddc78e 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -158,12 +158,8 @@ impl Config { self } - /// Gets the user to authenticate with. - /// - /// If no user has been configured with the [`user`](Config::user) method, - /// then this defaults to the user executing this process. It always - /// returns `Some`. - // FIXME remove option + /// Gets the user to authenticate with, if one has been configured with + /// the `user` method. pub fn get_user(&self) -> Option<&str> { self.config.get_user() } diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index 0da5fc689..b178eac80 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -190,7 +190,7 @@ pub enum Host { /// ``` #[derive(Clone, PartialEq, Eq)] pub struct Config { - pub(crate) user: String, + pub(crate) user: Option, pub(crate) password: Option>, pub(crate) dbname: Option, pub(crate) options: Option, @@ -219,7 +219,7 @@ impl Config { /// Creates a new configuration. pub fn new() -> Config { Config { - user: whoami::username(), + user: None, password: None, dbname: None, options: None, @@ -247,18 +247,14 @@ impl Config { /// /// Defaults to the user executing this process. pub fn user(&mut self, user: &str) -> &mut Config { - self.user = user.to_string(); + self.user = Some(user.to_string()); self } - /// Gets the user to authenticate with. - /// - /// If no user has been configured with the [`user`](Config::user) method, - /// then this defaults to the user executing this process. It always - /// returns `Some`. - // FIXME remove option + /// Gets the user to authenticate with, if one has been configured with + /// the `user` method. pub fn get_user(&self) -> Option<&str> { - Some(&self.user) + self.user.as_deref() } /// Sets the password to authenticate with. diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index 11cc48ef8..f19bb50c4 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -96,8 +96,10 @@ where delayed: VecDeque::new(), }; - startup(&mut stream, config).await?; - authenticate(&mut stream, config).await?; + let user = config.user.clone().unwrap_or_else(whoami::username); + + startup(&mut stream, config, &user).await?; + authenticate(&mut stream, config, &user).await?; let (process_id, secret_key, parameters) = read_info(&mut stream).await?; let (sender, receiver) = mpsc::unbounded(); @@ -107,13 +109,17 @@ where Ok((client, connection)) } -async fn startup(stream: &mut StartupStream, config: &Config) -> Result<(), Error> +async fn startup( + stream: &mut StartupStream, + config: &Config, + user: &str, +) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { let mut params = vec![("client_encoding", "UTF8")]; - params.push(("user", &config.user)); + params.push(("user", user)); if let Some(dbname) = &config.dbname { params.push(("database", &**dbname)); } @@ -133,7 +139,11 @@ where .map_err(Error::io) } -async fn authenticate(stream: &mut StartupStream, config: &Config) -> Result<(), Error> +async fn authenticate( + stream: &mut StartupStream, + config: &Config, + user: &str, +) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsStream + Unpin, @@ -156,7 +166,6 @@ where Some(Message::AuthenticationMd5Password(body)) => { can_skip_channel_binding(config)?; - let user = &config.user; let pass = config .password .as_ref() From b411e5c3cb71d43fc9249b5d3ca38a7213470069 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:35:48 -0400 Subject: [PATCH 707/819] clippy --- postgres-protocol/src/types/test.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 6f1851fc2..3e33b08f0 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -174,7 +174,7 @@ fn ltree_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); - assert!(matches!(ltree_from_sql(query.as_slice()), Ok(_))) + assert!(ltree_from_sql(query.as_slice()).is_ok()) } #[test] @@ -182,7 +182,7 @@ fn ltree_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); - assert!(matches!(ltree_from_sql(query.as_slice()), Err(_))) + assert!(ltree_from_sql(query.as_slice()).is_err()) } #[test] @@ -202,7 +202,7 @@ fn lquery_str() { let mut query = vec![1u8]; query.extend_from_slice("A.B.C".as_bytes()); - assert!(matches!(lquery_from_sql(query.as_slice()), Ok(_))) + assert!(lquery_from_sql(query.as_slice()).is_ok()) } #[test] @@ -210,7 +210,7 @@ fn lquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("A.B.C".as_bytes()); - assert!(matches!(lquery_from_sql(query.as_slice()), Err(_))) + assert!(lquery_from_sql(query.as_slice()).is_err()) } #[test] @@ -230,7 +230,7 @@ fn ltxtquery_str() { let mut query = vec![1u8]; query.extend_from_slice("a & b*".as_bytes()); - assert!(matches!(ltree_from_sql(query.as_slice()), Ok(_))) + assert!(ltree_from_sql(query.as_slice()).is_ok()) } #[test] @@ -238,5 +238,5 @@ fn ltxtquery_wrong_version() { let mut query = vec![2u8]; query.extend_from_slice("a & b*".as_bytes()); - assert!(matches!(ltree_from_sql(query.as_slice()), Err(_))) + assert!(ltree_from_sql(query.as_slice()).is_err()) } From 016e9a3b8557c267f650090e1501d5efd00de908 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:40:01 -0400 Subject: [PATCH 708/819] avoid a silly clone --- tokio-postgres/src/connect_raw.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index f19bb50c4..19be9eb01 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -13,6 +13,7 @@ use postgres_protocol::authentication::sasl; use postgres_protocol::authentication::sasl::ScramSha256; use postgres_protocol::message::backend::{AuthenticationSaslBody, Message}; use postgres_protocol::message::frontend; +use std::borrow::Cow; use std::collections::{HashMap, VecDeque}; use std::io; use std::pin::Pin; @@ -96,7 +97,10 @@ where delayed: VecDeque::new(), }; - let user = config.user.clone().unwrap_or_else(whoami::username); + let user = config + .user + .as_deref() + .map_or_else(|| Cow::Owned(whoami::username()), Cow::Borrowed); startup(&mut stream, config, &user).await?; authenticate(&mut stream, config, &user).await?; From 234e20bb000ccf17d08341bd66e48d1105c3960a Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:40:40 -0400 Subject: [PATCH 709/819] bump ci version --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a669a40f..008158fb0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.67.0 + version: 1.70.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From c50fcbd9fb6f0df53d2300fb429af1c6c128007f Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:45:34 -0400 Subject: [PATCH 710/819] Release tokio-postgres v0.7.10 --- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 41a1a65d1..2bee9a1c4 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.7.10 + +## Fixed + +* Defered default username lookup to avoid regressing `Config` behavior. + ## v0.7.9 ## Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 3b33cc8f6..ec5e3cbec 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.9" +version = "0.7.10" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" From c5ff8cfd86e897b7c197f52684a37a4f17cecb75 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Fri, 25 Aug 2023 13:48:08 -0400 Subject: [PATCH 711/819] Release postgres v0.19.7 --- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 4 ++-- tokio-postgres/CHANGELOG.md | 6 +++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index fe9e8dbe8..7f856b5ac 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.19.7 - 2023-08-25 + +## Fixed + +* Defered default username lookup to avoid regressing `Config` behavior. + ## v0.19.6 - 2023-08-19 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ff626f86c..18406da9f 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.6" +version = "0.19.7" authors = ["Steven Fackler "] edition = "2018" license = "MIT/Apache-2.0" @@ -40,7 +40,7 @@ bytes = "1.0" fallible-iterator = "0.2" futures-util = { version = "0.3.14", features = ["sink"] } log = "0.4" -tokio-postgres = { version = "0.7.9", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.10", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } [dev-dependencies] diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 2bee9a1c4..75448d130 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,12 +1,12 @@ # Change Log -## v0.7.10 +## v0.7.10 - 2023-08-25 ## Fixed * Defered default username lookup to avoid regressing `Config` behavior. -## v0.7.9 +## v0.7.9 - 2023-08-19 ## Fixed @@ -19,7 +19,7 @@ * Added support for the `load_balance_hosts` config option to randomize connection ordering. * The `user` config option now defaults to the executing process's user. -## v0.7.8 +## v0.7.8 - 2023-05-27 ## Added From b1306a4a74317ac142ae9b93445360e9597380ec Mon Sep 17 00:00:00 2001 From: ds-cbo <82801887+ds-cbo@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:31:41 +0200 Subject: [PATCH 712/819] remove rustc-serialize dependency --- postgres-types/Cargo.toml | 4 +++- postgres/src/lib.rs | 2 +- tokio-postgres/CHANGELOG.md | 6 ++++++ tokio-postgres/Cargo.toml | 3 +-- tokio-postgres/src/lib.rs | 2 +- tokio-postgres/tests/test/types/eui48_04.rs | 18 ------------------ tokio-postgres/tests/test/types/mod.rs | 2 -- 7 files changed, 12 insertions(+), 25 deletions(-) delete mode 100644 tokio-postgres/tests/test/types/eui48_04.rs diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 193d159a1..cfd083637 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -39,8 +39,10 @@ chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, "clock", ], optional = true } cidr-02 = { version = "0.2", package = "cidr", optional = true } +# eui48-04 will stop compiling and support will be removed +# See https://github.com/sfackler/rust-postgres/issues/1073 eui48-04 = { version = "0.4", package = "eui48", optional = true } -eui48-1 = { version = "1.0", package = "eui48", optional = true } +eui48-1 = { version = "1.0", package = "eui48", optional = true, default-features = false } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index fbe85cbde..ddf1609ad 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -55,7 +55,7 @@ //! | ------- | ----------- | ------------------ | ------- | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 75448d130..bd076eef9 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +* Disable `rustc-serialize` compatibility of `eui48-1` dependency +* Remove tests for `eui48-04` + + ## v0.7.10 - 2023-08-25 ## Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index ec5e3cbec..bb58eb2d9 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -78,8 +78,7 @@ tokio = { version = "1.0", features = [ bit-vec-06 = { version = "0.6", package = "bit-vec" } chrono-04 = { version = "0.4", package = "chrono", default-features = false } -eui48-04 = { version = "0.4", package = "eui48" } -eui48-1 = { version = "1.0", package = "eui48" } +eui48-1 = { version = "1.0", package = "eui48", default-features = false } geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } serde-1 = { version = "1.0", package = "serde" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index ff8e93ddc..2973d33b0 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -107,7 +107,7 @@ //! | `array-impls` | Enables `ToSql` and `FromSql` trait impls for arrays | - | no | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | //! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | diff --git a/tokio-postgres/tests/test/types/eui48_04.rs b/tokio-postgres/tests/test/types/eui48_04.rs deleted file mode 100644 index 074faa37e..000000000 --- a/tokio-postgres/tests/test/types/eui48_04.rs +++ /dev/null @@ -1,18 +0,0 @@ -use eui48_04::MacAddress; - -use crate::types::test_type; - -#[tokio::test] -async fn test_eui48_params() { - test_type( - "MACADDR", - &[ - ( - Some(MacAddress::parse_str("12-34-56-AB-CD-EF").unwrap()), - "'12-34-56-ab-cd-ef'", - ), - (None, "NULL"), - ], - ) - .await -} diff --git a/tokio-postgres/tests/test/types/mod.rs b/tokio-postgres/tests/test/types/mod.rs index f1a44da08..62d54372a 100644 --- a/tokio-postgres/tests/test/types/mod.rs +++ b/tokio-postgres/tests/test/types/mod.rs @@ -17,8 +17,6 @@ use bytes::BytesMut; mod bit_vec_06; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; -#[cfg(feature = "with-eui48-0_4")] -mod eui48_04; #[cfg(feature = "with-eui48-1")] mod eui48_1; #[cfg(feature = "with-geo-types-0_6")] From ea9e0e5cddc2e772179027e635afa11d64feea2b Mon Sep 17 00:00:00 2001 From: ds-cbo <82801887+ds-cbo@users.noreply.github.com> Date: Mon, 30 Oct 2023 10:43:56 +0100 Subject: [PATCH 713/819] replace deprecated chrono::DateTime::from_utc --- postgres-types/src/chrono_04.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 0ec92437d..6011b549e 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -40,7 +40,7 @@ impl ToSql for NaiveDateTime { impl<'a> FromSql<'a> for DateTime { fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { let naive = NaiveDateTime::from_sql(type_, raw)?; - Ok(DateTime::from_utc(naive, Utc)) + Ok(Utc::from_utc_datetime(naive)) } accepts!(TIMESTAMPTZ); From b4ebc4e7ec6ee52930bc22e2ad29b66687852623 Mon Sep 17 00:00:00 2001 From: ds-cbo <82801887+ds-cbo@users.noreply.github.com> Date: Mon, 30 Oct 2023 16:39:50 +0100 Subject: [PATCH 714/819] add missing import --- postgres-types/src/chrono_04.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 6011b549e..f995d483c 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -1,5 +1,7 @@ use bytes::BytesMut; -use chrono_04::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use chrono_04::{ + DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc, +}; use postgres_protocol::types; use std::error::Error; From 19a6ef767bf6b2070ffe9efd43af514b6a31f2d2 Mon Sep 17 00:00:00 2001 From: ds-cbo <82801887+ds-cbo@users.noreply.github.com> Date: Tue, 31 Oct 2023 09:54:13 +0100 Subject: [PATCH 715/819] fix more deprecated chrono functions --- postgres-types/src/chrono_04.rs | 2 +- tokio-postgres/tests/test/types/chrono_04.rs | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index f995d483c..d599bde02 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -42,7 +42,7 @@ impl ToSql for NaiveDateTime { impl<'a> FromSql<'a> for DateTime { fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { let naive = NaiveDateTime::from_sql(type_, raw)?; - Ok(Utc::from_utc_datetime(naive)) + Ok(Utc.from_utc_datetime(&naive)) } accepts!(TIMESTAMPTZ); diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index a8e9e5afa..b010055ba 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -53,10 +53,9 @@ async fn test_with_special_naive_date_time_params() { async fn test_date_time_params() { fn make_check(time: &str) -> (Option>, &str) { ( - Some( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), - ), + Some(Utc.from_utc_datetime( + &NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), + )), time, ) } @@ -76,10 +75,9 @@ async fn test_date_time_params() { async fn test_with_special_date_time_params() { fn make_check(time: &str) -> (Timestamp>, &str) { ( - Timestamp::Value( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), - ), + Timestamp::Value(Utc.from_utc_datetime( + &NaiveDateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'").unwrap(), + )), time, ) } From 863c1d6039e8fe114e48d62c0451d6eb5e4867a2 Mon Sep 17 00:00:00 2001 From: James Guthrie Date: Tue, 7 Nov 2023 22:09:39 +0100 Subject: [PATCH 716/819] fix code block --- postgres-types/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 52b5c773a..aaf145e6b 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -168,6 +168,8 @@ //! 'Happy' //! ); //! ``` +//! +//! ```rust //! #[postgres(allow_mismatch)] //! enum Mood { //! Happy, From 10edbcb46c44933417e8d2e7a1c1d63c4119beb3 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Tue, 7 Nov 2023 16:23:06 -0500 Subject: [PATCH 717/819] Update lib.rs --- postgres-types/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index aaf145e6b..2f02f6e5f 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -170,6 +170,11 @@ //! ``` //! //! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] //! #[postgres(allow_mismatch)] //! enum Mood { //! Happy, From 02bab67280f8a850b816754b29eb0364708604ec Mon Sep 17 00:00:00 2001 From: "Michael P. Jung" Date: Tue, 5 Dec 2023 13:54:20 +0100 Subject: [PATCH 718/819] Add table_oid and field_id to columns of prepared statements --- tokio-postgres/CHANGELOG.md | 2 +- tokio-postgres/src/prepare.rs | 8 +++++++- tokio-postgres/src/statement.rs | 23 +++++++++++++++++------ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index bd076eef9..9f5eb9521 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -4,7 +4,7 @@ * Disable `rustc-serialize` compatibility of `eui48-1` dependency * Remove tests for `eui48-04` - +* Add `table_oid` and `field_id` fields to `Columns` struct of prepared statements. ## v0.7.10 - 2023-08-25 diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index e3f09a7c2..1ab34e2df 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -12,6 +12,7 @@ use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; +use std::num::{NonZeroI16, NonZeroU32}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -95,7 +96,12 @@ pub async fn prepare( let mut it = row_description.fields(); while let Some(field) = it.next().map_err(Error::parse)? { let type_ = get_type(client, field.type_oid()).await?; - let column = Column::new(field.name().to_string(), type_); + let column = Column { + name: field.name().to_string(), + table_oid: NonZeroU32::new(field.table_oid()), + column_id: NonZeroI16::new(field.column_id()), + type_, + }; columns.push(column); } } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 97561a8e4..73d56c220 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -5,6 +5,7 @@ use crate::types::Type; use postgres_protocol::message::frontend; use std::{ fmt, + num::{NonZeroI16, NonZeroU32}, sync::{Arc, Weak}, }; @@ -66,20 +67,28 @@ impl Statement { /// Information about a column of a query. pub struct Column { - name: String, - type_: Type, + pub(crate) name: String, + pub(crate) table_oid: Option, + pub(crate) column_id: Option, + pub(crate) type_: Type, } impl Column { - pub(crate) fn new(name: String, type_: Type) -> Column { - Column { name, type_ } - } - /// Returns the name of the column. pub fn name(&self) -> &str { &self.name } + /// Returns the OID of the underlying database table. + pub fn table_oid(&self) -> Option { + self.table_oid + } + + /// Return the column ID within the underlying database table. + pub fn column_id(&self) -> Option { + self.column_id + } + /// Returns the type of the column. pub fn type_(&self) -> &Type { &self.type_ @@ -90,6 +99,8 @@ impl fmt::Debug for Column { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Column") .field("name", &self.name) + .field("table_oid", &self.table_oid) + .field("column_id", &self.column_id) .field("type", &self.type_) .finish() } From 87876150d79e637767247176e339bf01a8b32d3b Mon Sep 17 00:00:00 2001 From: "Michael P. Jung" Date: Tue, 5 Dec 2023 14:09:44 +0100 Subject: [PATCH 719/819] Simplify Debug impl of Column --- tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/src/statement.rs | 17 +++-------------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 1ab34e2df..0302cdb4c 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -100,7 +100,7 @@ pub async fn prepare( name: field.name().to_string(), table_oid: NonZeroU32::new(field.table_oid()), column_id: NonZeroI16::new(field.column_id()), - type_, + r#type: type_, }; columns.push(column); } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 73d56c220..fe3b6b7a1 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -4,7 +4,6 @@ use crate::connection::RequestMessages; use crate::types::Type; use postgres_protocol::message::frontend; use std::{ - fmt, num::{NonZeroI16, NonZeroU32}, sync::{Arc, Weak}, }; @@ -66,11 +65,12 @@ impl Statement { } /// Information about a column of a query. +#[derive(Debug)] pub struct Column { pub(crate) name: String, pub(crate) table_oid: Option, pub(crate) column_id: Option, - pub(crate) type_: Type, + pub(crate) r#type: Type, } impl Column { @@ -91,17 +91,6 @@ impl Column { /// Returns the type of the column. pub fn type_(&self) -> &Type { - &self.type_ - } -} - -impl fmt::Debug for Column { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Column") - .field("name", &self.name) - .field("table_oid", &self.table_oid) - .field("column_id", &self.column_id) - .field("type", &self.type_) - .finish() + &self.r#type } } From bbc04145de7a83dfa66cb3cf4a68878da2c1cc32 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 11 Dec 2023 19:06:22 -0500 Subject: [PATCH 720/819] Update id types --- tokio-postgres/src/prepare.rs | 5 ++--- tokio-postgres/src/statement.rs | 13 +++++-------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 0302cdb4c..07fb45694 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -12,7 +12,6 @@ use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; use std::future::Future; -use std::num::{NonZeroI16, NonZeroU32}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -98,8 +97,8 @@ pub async fn prepare( let type_ = get_type(client, field.type_oid()).await?; let column = Column { name: field.name().to_string(), - table_oid: NonZeroU32::new(field.table_oid()), - column_id: NonZeroI16::new(field.column_id()), + table_oid: Some(field.table_oid()).filter(|n| *n != 0), + column_id: Some(field.column_id()).filter(|n| *n != 0), r#type: type_, }; columns.push(column); diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index fe3b6b7a1..c5d657738 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -3,10 +3,7 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::Type; use postgres_protocol::message::frontend; -use std::{ - num::{NonZeroI16, NonZeroU32}, - sync::{Arc, Weak}, -}; +use std::sync::{Arc, Weak}; struct StatementInner { client: Weak, @@ -68,8 +65,8 @@ impl Statement { #[derive(Debug)] pub struct Column { pub(crate) name: String, - pub(crate) table_oid: Option, - pub(crate) column_id: Option, + pub(crate) table_oid: Option, + pub(crate) column_id: Option, pub(crate) r#type: Type, } @@ -80,12 +77,12 @@ impl Column { } /// Returns the OID of the underlying database table. - pub fn table_oid(&self) -> Option { + pub fn table_oid(&self) -> Option { self.table_oid } /// Return the column ID within the underlying database table. - pub fn column_id(&self) -> Option { + pub fn column_id(&self) -> Option { self.column_id } From 90c92c2ae8577a8e771333c701280485c45ad602 Mon Sep 17 00:00:00 2001 From: Troy Benson Date: Mon, 15 Jan 2024 16:33:27 +0000 Subject: [PATCH 721/819] feat(types): add default derive to json wrapper Adds a Default impl for `Json where T: Default` allowing for other structs to use the wrapper and implement Default. --- postgres-types/src/serde_json_1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index b98d561d1..715c33f98 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -7,7 +7,7 @@ use std::fmt::Debug; use std::io::Read; /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct Json(pub T); impl<'a, T> FromSql<'a> for Json From 2f150a7e50ee03cbccf52792b7e4507dbcef0301 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 13:49:51 +0000 Subject: [PATCH 722/819] Update env_logger requirement from 0.10 to 0.11 Updates the requirements on [env_logger](https://github.com/rust-cli/env_logger) to permit the latest version. - [Release notes](https://github.com/rust-cli/env_logger/releases) - [Changelog](https://github.com/rust-cli/env_logger/blob/main/CHANGELOG.md) - [Commits](https://github.com/rust-cli/env_logger/compare/v0.10.0...v0.11.0) --- updated-dependencies: - dependency-name: env_logger dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- tokio-postgres/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index bb58eb2d9..237f3d2f1 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -67,7 +67,7 @@ socket2 = { version = "0.5", features = ["all"] } [dev-dependencies] futures-executor = "0.3" criterion = "0.5" -env_logger = "0.10" +env_logger = "0.11" tokio = { version = "1.0", features = [ "macros", "net", From 7bc3deb989b3030681b742801bfeaca7f67e1e1e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 22 Jan 2024 20:49:47 -0500 Subject: [PATCH 723/819] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 008158fb0..0cc823d35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.70.0 + version: 1.71.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From a92c6eb2b65e12d7145a14cec23888d64c4b13e4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Mon, 22 Jan 2024 20:54:11 -0500 Subject: [PATCH 724/819] Update main.rs --- tokio-postgres/tests/test/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 0ab4a7bab..737f46631 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -303,6 +303,7 @@ async fn custom_range() { } #[tokio::test] +#[allow(clippy::get_first)] async fn simple_query() { let client = connect("user=postgres").await; From 289cf887600785e723628dcbc1f7a2267cd52917 Mon Sep 17 00:00:00 2001 From: Charles Samuels Date: Fri, 16 Feb 2024 10:55:08 -0800 Subject: [PATCH 725/819] add #[track_caller] to the Row::get() functions This small quality-of-life improvement changes these errors: thread '' panicked at /../.cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-postgres-0.7.10/src/row.rs:151:25: error retrieving column 0: error deserializing column 0: a Postgres value was `NULL` to: thread '' panicked at my-program.rs:100:25: error retrieving column 0: error deserializing column 0: a Postgres value was `NULL` --- tokio-postgres/src/row.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index db179b432..3c79de603 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -141,6 +141,7 @@ impl Row { /// # Panics /// /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + #[track_caller] pub fn get<'a, I, T>(&'a self, idx: I) -> T where I: RowIndex + fmt::Display, @@ -239,6 +240,7 @@ impl SimpleQueryRow { /// # Panics /// /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + #[track_caller] pub fn get(&self, idx: I) -> Option<&str> where I: RowIndex + fmt::Display, From 25314a91c95dc8f75062e337eb363188c63df5d4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 17 Feb 2024 09:52:44 -0500 Subject: [PATCH 726/819] Bump CI version --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0cc823d35..641a42722 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,7 +82,7 @@ jobs: - run: docker compose up -d - uses: sfackler/actions/rustup@master with: - version: 1.71.0 + version: 1.74.0 - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT id: rust-version - uses: actions/cache@v3 From a9ca481c88fb619c6d35f2a6b64253bb46240c5d Mon Sep 17 00:00:00 2001 From: "chandr-andr (Kiselev Aleksandr)" Date: Sun, 3 Mar 2024 16:37:30 +0100 Subject: [PATCH 727/819] Added ReadOnly session attr --- tokio-postgres/src/config.rs | 3 +++ tokio-postgres/tests/test/parse.rs | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index b178eac80..c78346fff 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -34,6 +34,8 @@ pub enum TargetSessionAttrs { Any, /// The session must allow writes. ReadWrite, + /// The session allow only reads. + ReadOnly, } /// TLS configuration. @@ -622,6 +624,7 @@ impl Config { let target_session_attrs = match value { "any" => TargetSessionAttrs::Any, "read-write" => TargetSessionAttrs::ReadWrite, + "read-only" => TargetSessionAttrs::ReadOnly, _ => { return Err(Error::config_parse(Box::new(InvalidValue( "target_session_attrs", diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index 2c11899ca..04d422e27 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -34,6 +34,14 @@ fn settings() { .keepalives_idle(Duration::from_secs(30)) .target_session_attrs(TargetSessionAttrs::ReadWrite), ); + check( + "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-only", + Config::new() + .connect_timeout(Duration::from_secs(3)) + .keepalives(false) + .keepalives_idle(Duration::from_secs(30)) + .target_session_attrs(TargetSessionAttrs::ReadOnly), + ); } #[test] From 6a01730cbfed5d9c0aa694401704e6fe7ec0c8b5 Mon Sep 17 00:00:00 2001 From: "chandr-andr (Kiselev Aleksandr)" Date: Sun, 3 Mar 2024 19:17:50 +0100 Subject: [PATCH 728/819] Added ReadOnly session attr --- tokio-postgres/src/connect.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index ca57b9cdd..8189cb91c 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -160,7 +160,7 @@ where let has_hostname = hostname.is_some(); let (mut client, mut connection) = connect_raw(socket, tls, has_hostname, config).await?; - if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { + if config.target_session_attrs != TargetSessionAttrs::Any { let rows = client.simple_query_raw("SHOW transaction_read_only"); pin_mut!(rows); @@ -185,11 +185,21 @@ where match next.await.transpose()? { Some(SimpleQueryMessage::Row(row)) => { - if row.try_get(0)? == Some("on") { + let read_only_result = row.try_get(0)?; + if read_only_result == Some("on") + && config.target_session_attrs == TargetSessionAttrs::ReadWrite + { return Err(Error::connect(io::Error::new( io::ErrorKind::PermissionDenied, "database does not allow writes", ))); + } else if read_only_result == Some("off") + && config.target_session_attrs == TargetSessionAttrs::ReadOnly + { + return Err(Error::connect(io::Error::new( + io::ErrorKind::PermissionDenied, + "database is not read only", + ))); } else { break; } From 4217553586c4ce390179a281834b8f2c3197863e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 13:45:26 +0000 Subject: [PATCH 729/819] Update base64 requirement from 0.21 to 0.22 Updates the requirements on [base64](https://github.com/marshallpierce/rust-base64) to permit the latest version. - [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md) - [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: base64 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres-protocol/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index b44994811..bc83fc4e6 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -13,7 +13,7 @@ default = [] js = ["getrandom/js"] [dependencies] -base64 = "0.21" +base64 = "0.22" byteorder = "1.0" bytes = "1.0" fallible-iterator = "0.2" From 9d7c43c73955638624e75957a333fac5d9be1c02 Mon Sep 17 00:00:00 2001 From: novacrazy Date: Sun, 11 Feb 2024 03:03:19 -0600 Subject: [PATCH 730/819] Shrink query_opt/query_one codegen size very slightly --- tokio-postgres/src/client.rs | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 427a05049..d48a23a60 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -274,19 +274,9 @@ impl Client { where T: ?Sized + ToStatement, { - let stream = self.query_raw(statement, slice_iter(params)).await?; - pin_mut!(stream); - - let row = match stream.try_next().await? { - Some(row) => row, - None => return Err(Error::row_count()), - }; - - if stream.try_next().await?.is_some() { - return Err(Error::row_count()); - } - - Ok(row) + self.query_opt(statement, params) + .await + .and_then(|res| res.ok_or_else(Error::row_count)) } /// Executes a statements which returns zero or one rows, returning it. @@ -310,16 +300,22 @@ impl Client { let stream = self.query_raw(statement, slice_iter(params)).await?; pin_mut!(stream); - let row = match stream.try_next().await? { - Some(row) => row, - None => return Ok(None), - }; + let mut first = None; + + // Originally this was two calls to `try_next().await?`, + // once for the first element, and second to error if more than one. + // + // However, this new form with only one .await in a loop generates + // slightly smaller codegen/stack usage for the resulting future. + while let Some(row) = stream.try_next().await? { + if first.is_some() { + return Err(Error::row_count()); + } - if stream.try_next().await?.is_some() { - return Err(Error::row_count()); + first = Some(row); } - Ok(Some(row)) + Ok(first) } /// The maximally flexible version of [`query`]. From 97436303232127dbd448d71a50c6365bdbee083c Mon Sep 17 00:00:00 2001 From: laxjesse Date: Wed, 13 Mar 2024 11:10:58 -0400 Subject: [PATCH 731/819] use `split_once` instead of `split` to parse lsn strings [`str::split`](https://doc.rust-lang.org/std/primitive.str.html#method.split) allocates a vector and generates considerably more instructions when compiled than [`str::split_once`](https://doc.rust-lang.org/std/primitive.str.html#method.split_once). [`u64::from_str_radix(split_lo, 16)`](https://doc.rust-lang.org/std/primitive.u64.html#method.from_str_radix) will error if the `lsn_str` contains more than one `/` so this change should result in the same behavior as the current implementation despite not explicitly checking this. --- postgres-types/CHANGELOG.md | 6 ++++++ postgres-types/src/pg_lsn.rs | 18 ++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 72a1cbb6a..157a2cc7d 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +### Changed + +* `FromStr` implementation for `PgLsn` no longer allocates a `Vec` when splitting an lsn string on it's `/`. + ## v0.2.6 - 2023-08-19 ### Fixed diff --git a/postgres-types/src/pg_lsn.rs b/postgres-types/src/pg_lsn.rs index f0bbf4022..f339f9689 100644 --- a/postgres-types/src/pg_lsn.rs +++ b/postgres-types/src/pg_lsn.rs @@ -33,16 +33,14 @@ impl FromStr for PgLsn { type Err = ParseLsnError; fn from_str(lsn_str: &str) -> Result { - let split: Vec<&str> = lsn_str.split('/').collect(); - if split.len() == 2 { - let (hi, lo) = ( - u64::from_str_radix(split[0], 16).map_err(|_| ParseLsnError(()))?, - u64::from_str_radix(split[1], 16).map_err(|_| ParseLsnError(()))?, - ); - Ok(PgLsn((hi << 32) | lo)) - } else { - Err(ParseLsnError(())) - } + let Some((split_hi, split_lo)) = lsn_str.split_once('/') else { + return Err(ParseLsnError(())); + }; + let (hi, lo) = ( + u64::from_str_radix(split_hi, 16).map_err(|_| ParseLsnError(()))?, + u64::from_str_radix(split_lo, 16).map_err(|_| ParseLsnError(()))?, + ); + Ok(PgLsn((hi << 32) | lo)) } } From 3836a3052065bccf53001b832a21823204bfa137 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Wed, 10 Apr 2024 17:42:13 +0200 Subject: [PATCH 732/819] Make license metadata SPDX compliant --- postgres-derive/Cargo.toml | 4 ++-- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 51ebb5663..5d1604b24 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -2,7 +2,7 @@ name = "postgres-derive" version = "0.4.5" authors = ["Steven Fackler "] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" description = "An internal crate used by postgres-types" repository = "https://github.com/sfackler/rust-postgres" @@ -15,4 +15,4 @@ test = false syn = "2.0" proc-macro2 = "1.0" quote = "1.0" -heck = "0.4" \ No newline at end of file +heck = "0.4" diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 1f2f6385d..936eeeaa4 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -3,7 +3,7 @@ name = "postgres-native-tls" version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via native-tls" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 8671308af..b7ebd3385 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -3,7 +3,7 @@ name = "postgres-openssl" version = "0.5.0" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via openssl" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index bc83fc4e6..a8a130495 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.6" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index cfd083637..bf011251b 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -3,7 +3,7 @@ name = "postgres-types" version = "0.2.6" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "Conversions between Rust and Postgres values" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 18406da9f..2ff3c875e 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -3,7 +3,7 @@ name = "postgres" version = "0.19.7" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "A native, synchronous PostgreSQL client" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 237f3d2f1..b3e56314f 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -3,7 +3,7 @@ name = "tokio-postgres" version = "0.7.10" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "A native, asynchronous PostgreSQL client" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" From 670cd7d5802dfb3b0b6b1eadd480f5c9730bb0b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:21:21 +0000 Subject: [PATCH 733/819] Update heck requirement from 0.4 to 0.5 --- updated-dependencies: - dependency-name: heck dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- postgres-derive/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 5d1604b24..cbae6c77b 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -15,4 +15,4 @@ test = false syn = "2.0" proc-macro2 = "1.0" quote = "1.0" -heck = "0.4" +heck = "0.5" From 3c6dbe9b8c7bfad82c646f34092e3fa1d321b723 Mon Sep 17 00:00:00 2001 From: Yuri Astrakhan Date: Wed, 1 May 2024 22:46:06 -0400 Subject: [PATCH 734/819] Avoid extra clone in config if possible Using `impl Into` instead of `&str` in a fn arg allows both `&str` and `String` as parameters - thus if the caller already has a String object that it doesn't need, it can pass it in without extra cloning. The same might be done with the password, but may require closer look. --- tokio-postgres/src/config.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index c78346fff..62b45f793 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -248,8 +248,8 @@ impl Config { /// Sets the user to authenticate with. /// /// Defaults to the user executing this process. - pub fn user(&mut self, user: &str) -> &mut Config { - self.user = Some(user.to_string()); + pub fn user(&mut self, user: impl Into) -> &mut Config { + self.user = Some(user.into()); self } @@ -277,8 +277,8 @@ impl Config { /// Sets the name of the database to connect to. /// /// Defaults to the user. - pub fn dbname(&mut self, dbname: &str) -> &mut Config { - self.dbname = Some(dbname.to_string()); + pub fn dbname(&mut self, dbname: impl Into) -> &mut Config { + self.dbname = Some(dbname.into()); self } @@ -289,8 +289,8 @@ impl Config { } /// Sets command line options used to configure the server. - pub fn options(&mut self, options: &str) -> &mut Config { - self.options = Some(options.to_string()); + pub fn options(&mut self, options: impl Into) -> &mut Config { + self.options = Some(options.into()); self } @@ -301,8 +301,8 @@ impl Config { } /// Sets the value of the `application_name` runtime parameter. - pub fn application_name(&mut self, application_name: &str) -> &mut Config { - self.application_name = Some(application_name.to_string()); + pub fn application_name(&mut self, application_name: impl Into) -> &mut Config { + self.application_name = Some(application_name.into()); self } @@ -330,7 +330,9 @@ impl Config { /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. /// There must be either no hosts, or the same number of hosts as hostaddrs. - pub fn host(&mut self, host: &str) -> &mut Config { + pub fn host(&mut self, host: impl Into) -> &mut Config { + let host = host.into(); + #[cfg(unix)] { if host.starts_with('/') { @@ -338,7 +340,7 @@ impl Config { } } - self.host.push(Host::Tcp(host.to_string())); + self.host.push(Host::Tcp(host)); self } @@ -990,7 +992,7 @@ impl<'a> UrlParser<'a> { let mut it = creds.splitn(2, ':'); let user = self.decode(it.next().unwrap())?; - self.config.user(&user); + self.config.user(user); if let Some(password) = it.next() { let password = Cow::from(percent_encoding::percent_decode(password.as_bytes())); @@ -1053,7 +1055,7 @@ impl<'a> UrlParser<'a> { }; if !dbname.is_empty() { - self.config.dbname(&self.decode(dbname)?); + self.config.dbname(self.decode(dbname)?); } Ok(()) From d5d75d3a2f064425436c08b6a8f2da2b985aab3d Mon Sep 17 00:00:00 2001 From: vsuryamurthy Date: Thu, 23 May 2024 17:18:41 +0200 Subject: [PATCH 735/819] add simple_query to GenericClient in tokio_postgres --- tokio-postgres/CHANGELOG.md | 1 + tokio-postgres/src/generic_client.rs | 35 ++++++++++++++++++---------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 9f5eb9521..775c22e34 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -5,6 +5,7 @@ * Disable `rustc-serialize` compatibility of `eui48-1` dependency * Remove tests for `eui48-04` * Add `table_oid` and `field_id` fields to `Columns` struct of prepared statements. +* Add `GenericClient::simple_query`. ## v0.7.10 - 2023-08-25 diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 50cff9712..d80dd3b86 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -1,6 +1,6 @@ use crate::query::RowStream; use crate::types::{BorrowToSql, ToSql, Type}; -use crate::{Client, Error, Row, Statement, ToStatement, Transaction}; +use crate::{Client, Error, Row, SimpleQueryMessage, Statement, ToStatement, Transaction}; use async_trait::async_trait; mod private { @@ -12,12 +12,12 @@ mod private { /// This trait is "sealed", and cannot be implemented outside of this crate. #[async_trait] pub trait GenericClient: private::Sealed { - /// Like `Client::execute`. + /// Like [`Client::execute`]. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::execute_raw`. + /// Like [`Client::execute_raw`]. async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -25,12 +25,12 @@ pub trait GenericClient: private::Sealed { I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; - /// Like `Client::query`. + /// Like [`Client::query`]. async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_one`. + /// Like [`Client::query_one`]. async fn query_one( &self, statement: &T, @@ -39,7 +39,7 @@ pub trait GenericClient: private::Sealed { where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_opt`. + /// Like [`Client::query_opt`]. async fn query_opt( &self, statement: &T, @@ -48,7 +48,7 @@ pub trait GenericClient: private::Sealed { where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_raw`. + /// Like [`Client::query_raw`]. async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, @@ -56,23 +56,26 @@ pub trait GenericClient: private::Sealed { I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; - /// Like `Client::prepare`. + /// Like [`Client::prepare`]. async fn prepare(&self, query: &str) -> Result; - /// Like `Client::prepare_typed`. + /// Like [`Client::prepare_typed`]. async fn prepare_typed( &self, query: &str, parameter_types: &[Type], ) -> Result; - /// Like `Client::transaction`. + /// Like [`Client::transaction`]. async fn transaction(&mut self) -> Result, Error>; - /// Like `Client::batch_execute`. + /// Like [`Client::batch_execute`]. async fn batch_execute(&self, query: &str) -> Result<(), Error>; - /// Returns a reference to the underlying `Client`. + /// Like [`Client::simple_query`]. + async fn simple_query(&self, query: &str) -> Result, Error>; + + /// Returns a reference to the underlying [`Client`]. fn client(&self) -> &Client; } @@ -156,6 +159,10 @@ impl GenericClient for Client { self.batch_execute(query).await } + async fn simple_query(&self, query: &str) -> Result, Error> { + self.simple_query(query).await + } + fn client(&self) -> &Client { self } @@ -243,6 +250,10 @@ impl GenericClient for Transaction<'_> { self.batch_execute(query).await } + async fn simple_query(&self, query: &str) -> Result, Error> { + self.simple_query(query).await + } + fn client(&self) -> &Client { self.client() } From fbecae11ace79376b20ae8b9a587ab577e8287cd Mon Sep 17 00:00:00 2001 From: Duarte Nunes Date: Mon, 11 Mar 2024 14:43:50 -0300 Subject: [PATCH 736/819] feat(types): add 'js' feature for wasm Enables the "js" feature of postgres-protocol. --- postgres-types/Cargo.toml | 1 + tokio-postgres/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index bf011251b..33296db2c 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -13,6 +13,7 @@ categories = ["database"] [features] derive = ["postgres-derive"] array-impls = ["array-init"] +js = ["postgres-protocol/js"] with-bit-vec-0_6 = ["bit-vec-06"] with-cidr-0_2 = ["cidr-02"] with-chrono-0_4 = ["chrono-04"] diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index b3e56314f..2e080cfb2 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -40,7 +40,7 @@ with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] with-uuid-1 = ["postgres-types/with-uuid-1"] with-time-0_2 = ["postgres-types/with-time-0_2"] with-time-0_3 = ["postgres-types/with-time-0_3"] -js = ["postgres-protocol/js"] +js = ["postgres-protocol/js", "postgres-types/js"] [dependencies] async-trait = "0.1" From 6cd4652bad6ac8474235c23d0e4e96cc4aa4d8db Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Tue, 28 May 2024 21:57:27 -0500 Subject: [PATCH 737/819] Add RowDescription to SimpleQueryMessage --- tokio-postgres/src/lib.rs | 6 ++++++ tokio-postgres/src/simple_query.rs | 5 +++-- tokio-postgres/tests/test/main.rs | 13 ++++++++++--- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2973d33b0..d650f4db9 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -118,6 +118,10 @@ //! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(rust_2018_idioms, clippy::all, missing_docs)] +use std::sync::Arc; + +use simple_query::SimpleColumn; + pub use crate::cancel_token::CancelToken; pub use crate::client::Client; pub use crate::config::Config; @@ -248,6 +252,8 @@ pub enum SimpleQueryMessage { /// /// The number of rows modified or selected is returned. CommandComplete(u64), + /// Column values of the proceeding row values + RowDescription(Arc<[SimpleColumn]>) } fn slice_iter<'a>( diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index bcc6d928b..4e0b7734d 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -95,14 +95,15 @@ impl Stream for SimpleQueryStream { return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); } Message::RowDescription(body) => { - let columns = body + let columns: Arc<[SimpleColumn]> = body .fields() .map(|f| Ok(SimpleColumn::new(f.name().to_string()))) .collect::>() .map_err(Error::parse)? .into(); - *this.columns = Some(columns); + *this.columns = Some(columns.clone()); + return Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns.clone())))); } Message::DataRow(body) => { let row = match &this.columns { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 737f46631..4fa72aec9 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -328,6 +328,13 @@ async fn simple_query() { _ => panic!("unexpected message"), } match &messages[2] { + SimpleQueryMessage::RowDescription(columns) => { + assert_eq!(columns.get(0).map(|c| c.name()), Some("id")); + assert_eq!(columns.get(1).map(|c| c.name()), Some("name")); + } + _ => panic!("unexpected message") + } + match &messages[3] { SimpleQueryMessage::Row(row) => { assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); @@ -336,7 +343,7 @@ async fn simple_query() { } _ => panic!("unexpected message"), } - match &messages[3] { + match &messages[4] { SimpleQueryMessage::Row(row) => { assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); @@ -345,11 +352,11 @@ async fn simple_query() { } _ => panic!("unexpected message"), } - match messages[4] { + match messages[5] { SimpleQueryMessage::CommandComplete(2) => {} _ => panic!("unexpected message"), } - assert_eq!(messages.len(), 5); + assert_eq!(messages.len(), 6); } #[tokio::test] From 7afead9a13d54f1c5ce9bef5eda1fb7ced26db61 Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Tue, 28 May 2024 22:08:41 -0500 Subject: [PATCH 738/819] Formatting updates --- tokio-postgres/src/lib.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index d650f4db9..6c6266736 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -118,10 +118,6 @@ //! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use std::sync::Arc; - -use simple_query::SimpleColumn; - pub use crate::cancel_token::CancelToken; pub use crate::client::Client; pub use crate::config::Config; @@ -134,7 +130,7 @@ pub use crate::generic_client::GenericClient; pub use crate::portal::Portal; pub use crate::query::RowStream; pub use crate::row::{Row, SimpleQueryRow}; -pub use crate::simple_query::SimpleQueryStream; +pub use crate::simple_query::{SimpleQueryStream, SimpleColumn}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; @@ -145,6 +141,7 @@ pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder}; use crate::types::ToSql; +use std::sync::Arc; pub mod binary_copy; mod bind; From eec06021d9ebe1c1c2fcc47666a76ce257ae2891 Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Tue, 28 May 2024 23:50:50 -0500 Subject: [PATCH 739/819] Clippy compliance --- tokio-postgres/src/simple_query.rs | 54 ++++++++++++++---------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 4e0b7734d..e84806d36 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -85,36 +85,34 @@ impl Stream for SimpleQueryStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); - loop { - match ready!(this.responses.poll_next(cx)?) { - Message::CommandComplete(body) => { - let rows = extract_row_affected(&body)?; - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); - } - Message::EmptyQueryResponse => { - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); - } - Message::RowDescription(body) => { - let columns: Arc<[SimpleColumn]> = body - .fields() - .map(|f| Ok(SimpleColumn::new(f.name().to_string()))) - .collect::>() - .map_err(Error::parse)? - .into(); + match ready!(this.responses.poll_next(cx)?) { + Message::CommandComplete(body) => { + let rows = extract_row_affected(&body)?; + Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))) + } + Message::EmptyQueryResponse => { + Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))) + } + Message::RowDescription(body) => { + let columns: Arc<[SimpleColumn]> = body + .fields() + .map(|f| Ok(SimpleColumn::new(f.name().to_string()))) + .collect::>() + .map_err(Error::parse)? + .into(); - *this.columns = Some(columns.clone()); - return Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns.clone())))); - } - Message::DataRow(body) => { - let row = match &this.columns { - Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, - None => return Poll::Ready(Some(Err(Error::unexpected_message()))), - }; - return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))); - } - Message::ReadyForQuery(_) => return Poll::Ready(None), - _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), + *this.columns = Some(columns.clone()); + Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns.clone())))) + } + Message::DataRow(body) => { + let row = match &this.columns { + Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, + None => return Poll::Ready(Some(Err(Error::unexpected_message()))), + }; + Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))) } + Message::ReadyForQuery(_) => Poll::Ready(None), + _ => Poll::Ready(Some(Err(Error::unexpected_message()))), } } } From bd6350c2fff2201d680a1814acf7a9208f4b7ad4 Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Wed, 29 May 2024 23:32:18 -0500 Subject: [PATCH 740/819] Formatting --- tokio-postgres/src/lib.rs | 4 ++-- tokio-postgres/src/simple_query.rs | 4 +++- tokio-postgres/tests/test/main.rs | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 6c6266736..a603158fb 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -130,7 +130,7 @@ pub use crate::generic_client::GenericClient; pub use crate::portal::Portal; pub use crate::query::RowStream; pub use crate::row::{Row, SimpleQueryRow}; -pub use crate::simple_query::{SimpleQueryStream, SimpleColumn}; +pub use crate::simple_query::{SimpleColumn, SimpleQueryStream}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; @@ -250,7 +250,7 @@ pub enum SimpleQueryMessage { /// The number of rows modified or selected is returned. CommandComplete(u64), /// Column values of the proceeding row values - RowDescription(Arc<[SimpleColumn]>) + RowDescription(Arc<[SimpleColumn]>), } fn slice_iter<'a>( diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index e84806d36..86af8e739 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -102,7 +102,9 @@ impl Stream for SimpleQueryStream { .into(); *this.columns = Some(columns.clone()); - Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns.clone())))) + Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription( + columns.clone(), + )))) } Message::DataRow(body) => { let row = match &this.columns { diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 4fa72aec9..e85960ab6 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -330,9 +330,9 @@ async fn simple_query() { match &messages[2] { SimpleQueryMessage::RowDescription(columns) => { assert_eq!(columns.get(0).map(|c| c.name()), Some("id")); - assert_eq!(columns.get(1).map(|c| c.name()), Some("name")); + assert_eq!(columns.get(1).map(|c| c.name()), Some("name")); } - _ => panic!("unexpected message") + _ => panic!("unexpected message"), } match &messages[3] { SimpleQueryMessage::Row(row) => { From f3976680c6d7004b04b3ba39f90f2956ce6d7010 Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Sun, 26 May 2024 11:05:00 -0700 Subject: [PATCH 741/819] Work with pools that don't support prepared statements Introduce a new `query_with_param_types` method that allows to specify Postgres type parameters. This obviated the need to use prepared statementsjust to obtain parameter types for a query. It then combines parse, bind, and execute in a single packet. Related: #1017, #1067 --- tokio-postgres/src/client.rs | 82 +++++++++++++++ tokio-postgres/src/generic_client.rs | 46 +++++++++ tokio-postgres/src/prepare.rs | 2 +- tokio-postgres/src/query.rs | 146 ++++++++++++++++++++++++++- tokio-postgres/src/statement.rs | 13 +++ tokio-postgres/src/transaction.rs | 27 +++++ tokio-postgres/tests/test/main.rs | 106 +++++++++++++++++++ 7 files changed, 416 insertions(+), 6 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index d48a23a60..431bfa792 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -364,6 +364,88 @@ impl Client { query::query(&self.inner, statement, params).await } + /// Like `query`, but requires the types of query parameters to be explicitly specified. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for prepare, execute, and close). Thus, + /// this is suitable in environments where prepared statements aren't supported (such as Cloudflare Workers with Hyperdrive). + /// + /// # Examples + /// + /// ```no_run + /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { + /// use tokio_postgres::types::ToSql; + /// use tokio_postgres::types::Type; + /// use futures_util::{pin_mut, TryStreamExt}; + /// + /// let rows = client.query_with_param_types( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// &[(&"first param", Type::TEXT), (&2i32, Type::INT4)], + /// ).await?; + /// + /// for row in rows { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn query_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_raw_with_param_types(statement, params) + .await? + .try_collect() + .await + } + + /// The maximally flexible version of [`query_with_param_types`]. + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list + /// provided, 1-indexed. + /// + /// The parameters must specify value along with their Postgres type. This allows performing + /// queries without three round trips (for prepare, execute, and close). + /// + /// [`query_with_param_types`]: #method.query_with_param_types + /// + /// # Examples + /// + /// ```no_run + /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { + /// use tokio_postgres::types::ToSql; + /// use tokio_postgres::types::Type; + /// use futures_util::{pin_mut, TryStreamExt}; + /// + /// let mut it = client.query_raw_with_param_types( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// &[(&"first param", Type::TEXT), (&2i32, Type::INT4)], + /// ).await?; + /// + /// pin_mut!(it); + /// while let Some(row) = it.try_next().await? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn query_raw_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result { + fn slice_iter<'a>( + s: &'a [(&'a (dyn ToSql + Sync), Type)], + ) -> impl ExactSizeIterator + 'a { + s.iter() + .map(|(param, param_type)| (*param as _, param_type.clone())) + } + + query::query_with_param_types(&self.inner, statement, slice_iter(params)).await + } + /// Executes a statement, returning the number of rows modified. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 50cff9712..3a0b09233 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -56,6 +56,20 @@ pub trait GenericClient: private::Sealed { I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; + /// Like `Client::query_with_param_types` + async fn query_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error>; + + /// Like `Client::query_raw_with_param_types`. + async fn query_raw_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result; + /// Like `Client::prepare`. async fn prepare(&self, query: &str) -> Result; @@ -136,6 +150,22 @@ impl GenericClient for Client { self.query_raw(statement, params).await } + async fn query_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_with_param_types(statement, params).await + } + + async fn query_raw_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result { + self.query_raw_with_param_types(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } @@ -222,6 +252,22 @@ impl GenericClient for Transaction<'_> { self.query_raw(statement, params).await } + async fn query_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_with_param_types(statement, params).await + } + + async fn query_raw_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result { + self.query_raw_with_param_types(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 07fb45694..1d9bacb16 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -131,7 +131,7 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu }) } -async fn get_type(client: &Arc, oid: Oid) -> Result { +pub(crate) async fn get_type(client: &Arc, oid: Oid) -> Result { if let Some(type_) = Type::from_oid(oid) { return Ok(type_); } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index e6e1d00a8..b9cc66405 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -1,17 +1,21 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::prepare::get_type; use crate::types::{BorrowToSql, IsNull}; -use crate::{Error, Portal, Row, Statement}; +use crate::{Column, Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; +use fallible_iterator::FallibleIterator; use futures_util::{ready, Stream}; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; -use postgres_protocol::message::backend::{CommandCompleteBody, Message}; +use postgres_protocol::message::backend::{CommandCompleteBody, Message, RowDescriptionBody}; use postgres_protocol::message::frontend; +use postgres_types::Type; use std::fmt; use std::marker::PhantomPinned; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; struct BorrowToSqlParamsDebug<'a, T>(&'a [T]); @@ -50,13 +54,125 @@ where }; let responses = start(client, buf).await?; Ok(RowStream { - statement, + statement: statement, responses, rows_affected: None, _p: PhantomPinned, }) } +enum QueryProcessingState { + Empty, + ParseCompleted, + BindCompleted, + ParameterDescribed, + Final(Vec), +} + +/// State machine for processing messages for `query_with_param_types`. +impl QueryProcessingState { + pub async fn process_message( + self, + client: &Arc, + message: Message, + ) -> Result { + match (self, message) { + (QueryProcessingState::Empty, Message::ParseComplete) => { + Ok(QueryProcessingState::ParseCompleted) + } + (QueryProcessingState::ParseCompleted, Message::BindComplete) => { + Ok(QueryProcessingState::BindCompleted) + } + (QueryProcessingState::BindCompleted, Message::ParameterDescription(_)) => { + Ok(QueryProcessingState::ParameterDescribed) + } + ( + QueryProcessingState::ParameterDescribed, + Message::RowDescription(row_description), + ) => Self::form_final(client, Some(row_description)).await, + (QueryProcessingState::ParameterDescribed, Message::NoData) => { + Self::form_final(client, None).await + } + (_, Message::ErrorResponse(body)) => Err(Error::db(body)), + _ => Err(Error::unexpected_message()), + } + } + + async fn form_final( + client: &Arc, + row_description: Option, + ) -> Result { + let mut columns = vec![]; + if let Some(row_description) = row_description { + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(client, field.type_oid()).await?; + let column = Column { + name: field.name().to_string(), + table_oid: Some(field.table_oid()).filter(|n| *n != 0), + column_id: Some(field.column_id()).filter(|n| *n != 0), + r#type: type_, + }; + columns.push(column); + } + } + + Ok(Self::Final(columns)) + } +} + +pub async fn query_with_param_types<'a, P, I>( + client: &Arc, + query: &str, + params: I, +) -> Result +where + P: BorrowToSql, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let (params, param_types): (Vec<_>, Vec<_>) = params.into_iter().unzip(); + + let params = params.into_iter(); + + let param_oids = param_types.iter().map(|t| t.oid()).collect::>(); + + let params = params.into_iter(); + + let buf = client.with_buf(|buf| { + frontend::parse("", query, param_oids.into_iter(), buf).map_err(Error::parse)?; + + encode_bind_with_statement_name_and_param_types("", ¶m_types, params, "", buf)?; + + frontend::describe(b'S', "", buf).map_err(Error::encode)?; + + frontend::execute("", 0, buf).map_err(Error::encode)?; + + frontend::sync(buf); + + Ok(buf.split().freeze()) + })?; + + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + let mut state = QueryProcessingState::Empty; + + loop { + let message = responses.next().await?; + + state = state.process_message(client, message).await?; + + if let QueryProcessingState::Final(columns) = state { + return Ok(RowStream { + statement: Statement::unnamed(vec![], columns), + responses, + rows_affected: None, + _p: PhantomPinned, + }); + } + } +} + pub async fn query_portal( client: &InnerClient, portal: &Portal, @@ -164,7 +280,27 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let param_types = statement.params(); + encode_bind_with_statement_name_and_param_types( + statement.name(), + statement.params(), + params, + portal, + buf, + ) +} + +fn encode_bind_with_statement_name_and_param_types( + statement_name: &str, + param_types: &[Type], + params: I, + portal: &str, + buf: &mut BytesMut, +) -> Result<(), Error> +where + P: BorrowToSql, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ let params = params.into_iter(); if param_types.len() != params.len() { @@ -181,7 +317,7 @@ where let mut error_idx = 0; let r = frontend::bind( portal, - statement.name(), + statement_name, param_formats, params.zip(param_types).enumerate(), |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(ty, buf) { diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index c5d657738..2b88ecd3b 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -14,6 +14,10 @@ struct StatementInner { impl Drop for StatementInner { fn drop(&mut self) { + if self.name.is_empty() { + // Unnamed statements don't need to be closed + return; + } if let Some(client) = self.client.upgrade() { let buf = client.with_buf(|buf| { frontend::close(b'S', &self.name, buf).unwrap(); @@ -46,6 +50,15 @@ impl Statement { })) } + pub(crate) fn unnamed(params: Vec, columns: Vec) -> Statement { + Statement(Arc::new(StatementInner { + client: Weak::new(), + name: String::new(), + params, + columns, + })) + } + pub(crate) fn name(&self) -> &str { &self.0.name } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 96a324652..5a6094b56 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -227,6 +227,33 @@ impl<'a> Transaction<'a> { query::query_portal(self.client.inner(), portal, max_rows).await } + /// Like `Client::query_with_param_types`. + pub async fn query_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_raw_with_param_types(statement, params) + .await? + .try_collect() + .await + } + + /// Like `Client::query_raw_with_param_types`. + pub async fn query_raw_with_param_types( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result { + fn slice_iter<'a>( + s: &'a [(&'a (dyn ToSql + Sync), Type)], + ) -> impl ExactSizeIterator + 'a { + s.iter() + .map(|(param, param_type)| (*param as _, param_type.clone())) + } + query::query_with_param_types(self.client.inner(), statement, slice_iter(params)).await + } + /// Like `Client::copy_in`. pub async fn copy_in(&self, statement: &T) -> Result, Error> where diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 737f46631..925c99206 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -952,3 +952,109 @@ async fn deferred_constraint() { .await .unwrap_err(); } + +#[tokio::test] +async fn query_with_param_types_no_transaction() { + let client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo ( + name TEXT, + age INT + ); + INSERT INTO foo (name, age) VALUES ('alice', 20), ('bob', 30), ('carol', 40); + ", + ) + .await + .unwrap(); + + let rows: Vec = client + .query_with_param_types( + "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", + &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], + ) + .await + .unwrap(); + + assert_eq!(rows.len(), 2); + let first_row = &rows[0]; + assert_eq!(first_row.get::<_, &str>(0), "bob"); + assert_eq!(first_row.get::<_, i32>(1), 30); + assert_eq!(first_row.get::<_, &str>(2), "literal"); + assert_eq!(first_row.get::<_, i32>(3), 5); + + let second_row = &rows[1]; + assert_eq!(second_row.get::<_, &str>(0), "carol"); + assert_eq!(second_row.get::<_, i32>(1), 40); + assert_eq!(second_row.get::<_, &str>(2), "literal"); + assert_eq!(second_row.get::<_, i32>(3), 5); +} + +#[tokio::test] +async fn query_with_param_types_with_transaction() { + let mut client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo ( + name TEXT, + age INT + ); + ", + ) + .await + .unwrap(); + + let transaction = client.transaction().await.unwrap(); + + let rows: Vec = transaction + .query_with_param_types( + "INSERT INTO foo (name, age) VALUES ($1, $2), ($3, $4), ($5, $6) returning name, age", + &[ + (&"alice", Type::TEXT), + (&20i32, Type::INT4), + (&"bob", Type::TEXT), + (&30i32, Type::INT4), + (&"carol", Type::TEXT), + (&40i32, Type::INT4), + ], + ) + .await + .unwrap(); + let inserted_values: Vec<(String, i32)> = rows + .iter() + .map(|row| (row.get::<_, String>(0), row.get::<_, i32>(1))) + .collect(); + assert_eq!( + inserted_values, + [ + ("alice".to_string(), 20), + ("bob".to_string(), 30), + ("carol".to_string(), 40) + ] + ); + + let rows: Vec = transaction + .query_with_param_types( + "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", + &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], + ) + .await + .unwrap(); + + assert_eq!(rows.len(), 2); + let first_row = &rows[0]; + assert_eq!(first_row.get::<_, &str>(0), "bob"); + assert_eq!(first_row.get::<_, i32>(1), 30); + assert_eq!(first_row.get::<_, &str>(2), "literal"); + assert_eq!(first_row.get::<_, i32>(3), 5); + + let second_row = &rows[1]; + assert_eq!(second_row.get::<_, &str>(0), "carol"); + assert_eq!(second_row.get::<_, i32>(1), 40); + assert_eq!(second_row.get::<_, &str>(2), "literal"); + assert_eq!(second_row.get::<_, i32>(3), 5); +} From 84994dad1aa9c3ef5c813b95c86c80dbfa4b7f0d Mon Sep 17 00:00:00 2001 From: Lev Kokotov Date: Sat, 6 Jul 2024 11:23:26 -0400 Subject: [PATCH 742/819] Derive Clone for Row --- postgres-protocol/src/message/backend.rs | 2 +- tokio-postgres/src/row.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 1b5be1098..c4439b26a 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -524,7 +524,7 @@ impl CopyOutResponseBody { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct DataRowBody { storage: Bytes, len: u16, diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 3c79de603..767c26921 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -95,6 +95,7 @@ where } /// A row of data returned from the database by a query. +#[derive(Clone)] pub struct Row { statement: Statement, body: DataRowBody, From 2b1949dd2f8745fcfaefe4b5e228684c25997265 Mon Sep 17 00:00:00 2001 From: Sidney Cammeresi Date: Sat, 6 Jul 2024 11:00:41 -0700 Subject: [PATCH 743/819] impl Debug for Statement The lack of this common trait bound caused some unpleasantness. For example, the following didn't compile: let x = OnceLock::new(); let stmt = db.prepare(...)?; x.set(stmt).expect(...); // returns Result<(), T=Statement> where T: Debug --- tokio-postgres/src/statement.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index c5d657738..4955d3b41 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -61,6 +61,16 @@ impl Statement { } } +impl std::fmt::Debug for Statement { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_struct("Statement") + .field("name", &self.0.name) + .field("params", &self.0.params) + .field("columns", &self.0.columns) + .finish_non_exhaustive() + } +} + /// Information about a column of a query. #[derive(Debug)] pub struct Column { From 1f312194928c5a385d51d52e5d13ca59d3dc1b43 Mon Sep 17 00:00:00 2001 From: Sidney Cammeresi Date: Sat, 6 Jul 2024 12:29:09 -0700 Subject: [PATCH 744/819] Fix a few nits pointed out by clippy - ...::max_value() -> ..::MAX - delete explicit import of signed integer types --- postgres-protocol/src/lib.rs | 2 +- postgres-types/src/lib.rs | 2 +- postgres-types/src/special.rs | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 83d9bf55c..e0de3b6c6 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -60,7 +60,7 @@ macro_rules! from_usize { impl FromUsize for $t { #[inline] fn from_usize(x: usize) -> io::Result<$t> { - if x > <$t>::max_value() as usize { + if x > <$t>::MAX as usize { Err(io::Error::new( io::ErrorKind::InvalidInput, "value too large to transmit", diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 2f02f6e5f..492039766 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -1222,7 +1222,7 @@ impl ToSql for IpAddr { } fn downcast(len: usize) -> Result> { - if len > i32::max_value() as usize { + if len > i32::MAX as usize { Err("value too large to transmit".into()) } else { Ok(len as i32) diff --git a/postgres-types/src/special.rs b/postgres-types/src/special.rs index 1a865287e..d8541bf0e 100644 --- a/postgres-types/src/special.rs +++ b/postgres-types/src/special.rs @@ -1,7 +1,6 @@ use bytes::BytesMut; use postgres_protocol::types; use std::error::Error; -use std::{i32, i64}; use crate::{FromSql, IsNull, ToSql, Type}; From 263b0068af39072bc7be05b6500e47f263cbd43e Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 6 Jul 2024 19:21:37 -0400 Subject: [PATCH 745/819] Handle non-UTF8 error fields --- postgres-protocol/src/message/backend.rs | 10 +++++-- tokio-postgres/src/error/mod.rs | 37 ++++++++++++------------ 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index c4439b26a..73b169288 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -633,7 +633,7 @@ impl<'a> FallibleIterator for ErrorFields<'a> { } let value_end = find_null(self.buf, 0)?; - let value = get_str(&self.buf[..value_end])?; + let value = &self.buf[..value_end]; self.buf = &self.buf[value_end + 1..]; Ok(Some(ErrorField { type_, value })) @@ -642,7 +642,7 @@ impl<'a> FallibleIterator for ErrorFields<'a> { pub struct ErrorField<'a> { type_: u8, - value: &'a str, + value: &'a [u8], } impl<'a> ErrorField<'a> { @@ -652,7 +652,13 @@ impl<'a> ErrorField<'a> { } #[inline] + #[deprecated(note = "use value_bytes instead", since = "0.6.7")] pub fn value(&self) -> &str { + str::from_utf8(self.value).expect("error field value contained non-UTF8 bytes") + } + + #[inline] + pub fn value_bytes(&self) -> &[u8] { self.value } } diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index f1e2644c6..75664d258 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -107,14 +107,15 @@ impl DbError { let mut routine = None; while let Some(field) = fields.next()? { + let value = String::from_utf8_lossy(field.value_bytes()); match field.type_() { - b'S' => severity = Some(field.value().to_owned()), - b'C' => code = Some(SqlState::from_code(field.value())), - b'M' => message = Some(field.value().to_owned()), - b'D' => detail = Some(field.value().to_owned()), - b'H' => hint = Some(field.value().to_owned()), + b'S' => severity = Some(value.into_owned()), + b'C' => code = Some(SqlState::from_code(&value)), + b'M' => message = Some(value.into_owned()), + b'D' => detail = Some(value.into_owned()), + b'H' => hint = Some(value.into_owned()), b'P' => { - normal_position = Some(field.value().parse::().map_err(|_| { + normal_position = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`P` field did not contain an integer", @@ -122,32 +123,32 @@ impl DbError { })?); } b'p' => { - internal_position = Some(field.value().parse::().map_err(|_| { + internal_position = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`p` field did not contain an integer", ) })?); } - b'q' => internal_query = Some(field.value().to_owned()), - b'W' => where_ = Some(field.value().to_owned()), - b's' => schema = Some(field.value().to_owned()), - b't' => table = Some(field.value().to_owned()), - b'c' => column = Some(field.value().to_owned()), - b'd' => datatype = Some(field.value().to_owned()), - b'n' => constraint = Some(field.value().to_owned()), - b'F' => file = Some(field.value().to_owned()), + b'q' => internal_query = Some(value.into_owned()), + b'W' => where_ = Some(value.into_owned()), + b's' => schema = Some(value.into_owned()), + b't' => table = Some(value.into_owned()), + b'c' => column = Some(value.into_owned()), + b'd' => datatype = Some(value.into_owned()), + b'n' => constraint = Some(value.into_owned()), + b'F' => file = Some(value.into_owned()), b'L' => { - line = Some(field.value().parse::().map_err(|_| { + line = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`L` field did not contain an integer", ) })?); } - b'R' => routine = Some(field.value().to_owned()), + b'R' => routine = Some(value.into_owned()), b'V' => { - parsed_severity = Some(Severity::from_str(field.value()).ok_or_else(|| { + parsed_severity = Some(Severity::from_str(&value).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "`V` field contained an invalid value", From cfd91632be877543a7e19e7a05816ed5d241b559 Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Sun, 7 Jul 2024 13:56:35 -0500 Subject: [PATCH 746/819] PR Fix: Only use single clone for RowDescription --- tokio-postgres/src/simple_query.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 86af8e739..b6500260e 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -101,9 +101,9 @@ impl Stream for SimpleQueryStream { .map_err(Error::parse)? .into(); - *this.columns = Some(columns.clone()); + *this.columns = Some(columns); Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription( - columns.clone(), + this.columns.as_ref().unwrap().clone(), )))) } Message::DataRow(body) => { From 3f8f5ded337a0122959f6e4a3dc9343bf6c6ee70 Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Sun, 7 Jul 2024 16:21:40 -0700 Subject: [PATCH 747/819] Replace the state machine to process messages with a direct match statements --- tokio-postgres/src/query.rs | 101 ++++++++++-------------------------- 1 file changed, 27 insertions(+), 74 deletions(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index b9cc66405..2bdfa14cc 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -9,7 +9,7 @@ use fallible_iterator::FallibleIterator; use futures_util::{ready, Stream}; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; -use postgres_protocol::message::backend::{CommandCompleteBody, Message, RowDescriptionBody}; +use postgres_protocol::message::backend::{CommandCompleteBody, Message}; use postgres_protocol::message::frontend; use postgres_types::Type; use std::fmt; @@ -61,66 +61,6 @@ where }) } -enum QueryProcessingState { - Empty, - ParseCompleted, - BindCompleted, - ParameterDescribed, - Final(Vec), -} - -/// State machine for processing messages for `query_with_param_types`. -impl QueryProcessingState { - pub async fn process_message( - self, - client: &Arc, - message: Message, - ) -> Result { - match (self, message) { - (QueryProcessingState::Empty, Message::ParseComplete) => { - Ok(QueryProcessingState::ParseCompleted) - } - (QueryProcessingState::ParseCompleted, Message::BindComplete) => { - Ok(QueryProcessingState::BindCompleted) - } - (QueryProcessingState::BindCompleted, Message::ParameterDescription(_)) => { - Ok(QueryProcessingState::ParameterDescribed) - } - ( - QueryProcessingState::ParameterDescribed, - Message::RowDescription(row_description), - ) => Self::form_final(client, Some(row_description)).await, - (QueryProcessingState::ParameterDescribed, Message::NoData) => { - Self::form_final(client, None).await - } - (_, Message::ErrorResponse(body)) => Err(Error::db(body)), - _ => Err(Error::unexpected_message()), - } - } - - async fn form_final( - client: &Arc, - row_description: Option, - ) -> Result { - let mut columns = vec![]; - if let Some(row_description) = row_description { - let mut it = row_description.fields(); - while let Some(field) = it.next().map_err(Error::parse)? { - let type_ = get_type(client, field.type_oid()).await?; - let column = Column { - name: field.name().to_string(), - table_oid: Some(field.table_oid()).filter(|n| *n != 0), - column_id: Some(field.column_id()).filter(|n| *n != 0), - r#type: type_, - }; - columns.push(column); - } - } - - Ok(Self::Final(columns)) - } -} - pub async fn query_with_param_types<'a, P, I>( client: &Arc, query: &str, @@ -155,20 +95,33 @@ where let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; - let mut state = QueryProcessingState::Empty; - loop { - let message = responses.next().await?; - - state = state.process_message(client, message).await?; - - if let QueryProcessingState::Final(columns) = state { - return Ok(RowStream { - statement: Statement::unnamed(vec![], columns), - responses, - rows_affected: None, - _p: PhantomPinned, - }); + match responses.next().await? { + Message::ParseComplete + | Message::BindComplete + | Message::ParameterDescription(_) + | Message::NoData => {} + Message::RowDescription(row_description) => { + let mut columns: Vec = vec![]; + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(client, field.type_oid()).await?; + let column = Column { + name: field.name().to_string(), + table_oid: Some(field.table_oid()).filter(|n| *n != 0), + column_id: Some(field.column_id()).filter(|n| *n != 0), + r#type: type_, + }; + columns.push(column); + } + return Ok(RowStream { + statement: Statement::unnamed(vec![], columns), + responses, + rows_affected: None, + _p: PhantomPinned, + }); + } + _ => return Err(Error::unexpected_message()), } } } From 74eb4dbf7399cb96500f2b60a2b838805471a26a Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Sun, 7 Jul 2024 16:43:41 -0700 Subject: [PATCH 748/819] Remove query_raw_with_param_types as per PR feedback --- tokio-postgres/src/client.rs | 56 ++++++---------------------- tokio-postgres/src/generic_client.rs | 23 ------------ tokio-postgres/src/transaction.rs | 20 +--------- 3 files changed, 12 insertions(+), 87 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 431bfa792..e420bcf2f 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -366,8 +366,13 @@ impl Client { /// Like `query`, but requires the types of query parameters to be explicitly specified. /// - /// Compared to `query`, this method allows performing queries without three round trips (for prepare, execute, and close). Thus, - /// this is suitable in environments where prepared statements aren't supported (such as Cloudflare Workers with Hyperdrive). + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. /// /// # Examples /// @@ -394,48 +399,6 @@ impl Client { statement: &str, params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error> { - self.query_raw_with_param_types(statement, params) - .await? - .try_collect() - .await - } - - /// The maximally flexible version of [`query_with_param_types`]. - /// - /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list - /// provided, 1-indexed. - /// - /// The parameters must specify value along with their Postgres type. This allows performing - /// queries without three round trips (for prepare, execute, and close). - /// - /// [`query_with_param_types`]: #method.query_with_param_types - /// - /// # Examples - /// - /// ```no_run - /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use tokio_postgres::types::ToSql; - /// use tokio_postgres::types::Type; - /// use futures_util::{pin_mut, TryStreamExt}; - /// - /// let mut it = client.query_raw_with_param_types( - /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// &[(&"first param", Type::TEXT), (&2i32, Type::INT4)], - /// ).await?; - /// - /// pin_mut!(it); - /// while let Some(row) = it.try_next().await? { - /// let foo: i32 = row.get("foo"); - /// println!("foo: {}", foo); - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn query_raw_with_param_types( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result { fn slice_iter<'a>( s: &'a [(&'a (dyn ToSql + Sync), Type)], ) -> impl ExactSizeIterator + 'a { @@ -443,7 +406,10 @@ impl Client { .map(|(param, param_type)| (*param as _, param_type.clone())) } - query::query_with_param_types(&self.inner, statement, slice_iter(params)).await + query::query_with_param_types(&self.inner, statement, slice_iter(params)) + .await? + .try_collect() + .await } /// Executes a statement, returning the number of rows modified. diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 3a0b09233..b892015dc 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -63,13 +63,6 @@ pub trait GenericClient: private::Sealed { params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error>; - /// Like `Client::query_raw_with_param_types`. - async fn query_raw_with_param_types( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result; - /// Like `Client::prepare`. async fn prepare(&self, query: &str) -> Result; @@ -158,14 +151,6 @@ impl GenericClient for Client { self.query_with_param_types(statement, params).await } - async fn query_raw_with_param_types( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result { - self.query_raw_with_param_types(statement, params).await - } - async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } @@ -260,14 +245,6 @@ impl GenericClient for Transaction<'_> { self.query_with_param_types(statement, params).await } - async fn query_raw_with_param_types( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result { - self.query_raw_with_param_types(statement, params).await - } - async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 5a6094b56..8a0ad2224 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -233,25 +233,7 @@ impl<'a> Transaction<'a> { statement: &str, params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error> { - self.query_raw_with_param_types(statement, params) - .await? - .try_collect() - .await - } - - /// Like `Client::query_raw_with_param_types`. - pub async fn query_raw_with_param_types( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result { - fn slice_iter<'a>( - s: &'a [(&'a (dyn ToSql + Sync), Type)], - ) -> impl ExactSizeIterator + 'a { - s.iter() - .map(|(param, param_type)| (*param as _, param_type.clone())) - } - query::query_with_param_types(self.client.inner(), statement, slice_iter(params)).await + self.client.query_with_param_types(statement, params).await } /// Like `Client::copy_in`. From 2647024c660ca27701898325a8772b83bece4982 Mon Sep 17 00:00:00 2001 From: Dane Rigby Date: Sun, 7 Jul 2024 21:30:23 -0500 Subject: [PATCH 749/819] PR Fix: Clone first then move --- tokio-postgres/src/simple_query.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index b6500260e..24473b896 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -101,10 +101,8 @@ impl Stream for SimpleQueryStream { .map_err(Error::parse)? .into(); - *this.columns = Some(columns); - Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription( - this.columns.as_ref().unwrap().clone(), - )))) + *this.columns = Some(columns.clone()); + Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns)))) } Message::DataRow(body) => { let row = match &this.columns { From dbd4d02e2f3a367b949e356e9dda40c08272d954 Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Mon, 8 Jul 2024 17:21:32 -0700 Subject: [PATCH 750/819] Address review comment to rename query_with_param_types to query_typed --- tokio-postgres/src/client.rs | 6 +++--- tokio-postgres/src/generic_client.rs | 12 ++++++------ tokio-postgres/src/query.rs | 2 +- tokio-postgres/src/transaction.rs | 6 +++--- tokio-postgres/tests/test/main.rs | 10 +++++----- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index e420bcf2f..2b29351a5 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -382,7 +382,7 @@ impl Client { /// use tokio_postgres::types::Type; /// use futures_util::{pin_mut, TryStreamExt}; /// - /// let rows = client.query_with_param_types( + /// let rows = client.query_typed( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", /// &[(&"first param", Type::TEXT), (&2i32, Type::INT4)], /// ).await?; @@ -394,7 +394,7 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub async fn query_with_param_types( + pub async fn query_typed( &self, statement: &str, params: &[(&(dyn ToSql + Sync), Type)], @@ -406,7 +406,7 @@ impl Client { .map(|(param, param_type)| (*param as _, param_type.clone())) } - query::query_with_param_types(&self.inner, statement, slice_iter(params)) + query::query_typed(&self.inner, statement, slice_iter(params)) .await? .try_collect() .await diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index e43bddfea..b91d78064 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -56,8 +56,8 @@ pub trait GenericClient: private::Sealed { I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; - /// Like [`Client::query_with_param_types`] - async fn query_with_param_types( + /// Like [`Client::query_typed`] + async fn query_typed( &self, statement: &str, params: &[(&(dyn ToSql + Sync), Type)], @@ -146,12 +146,12 @@ impl GenericClient for Client { self.query_raw(statement, params).await } - async fn query_with_param_types( + async fn query_typed( &self, statement: &str, params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error> { - self.query_with_param_types(statement, params).await + self.query_typed(statement, params).await } async fn prepare(&self, query: &str) -> Result { @@ -244,12 +244,12 @@ impl GenericClient for Transaction<'_> { self.query_raw(statement, params).await } - async fn query_with_param_types( + async fn query_typed( &self, statement: &str, params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error> { - self.query_with_param_types(statement, params).await + self.query_typed(statement, params).await } async fn prepare(&self, query: &str) -> Result { diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 2bdfa14cc..b54e095df 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -61,7 +61,7 @@ where }) } -pub async fn query_with_param_types<'a, P, I>( +pub async fn query_typed<'a, P, I>( client: &Arc, query: &str, params: I, diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 8a0ad2224..3e62b2ac7 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -227,13 +227,13 @@ impl<'a> Transaction<'a> { query::query_portal(self.client.inner(), portal, max_rows).await } - /// Like `Client::query_with_param_types`. - pub async fn query_with_param_types( + /// Like `Client::query_typed`. + pub async fn query_typed( &self, statement: &str, params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error> { - self.client.query_with_param_types(statement, params).await + self.client.query_typed(statement, params).await } /// Like `Client::copy_in`. diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 925c99206..7ddb7a36a 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -954,7 +954,7 @@ async fn deferred_constraint() { } #[tokio::test] -async fn query_with_param_types_no_transaction() { +async fn query_typed_no_transaction() { let client = connect("user=postgres").await; client @@ -971,7 +971,7 @@ async fn query_with_param_types_no_transaction() { .unwrap(); let rows: Vec = client - .query_with_param_types( + .query_typed( "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], ) @@ -993,7 +993,7 @@ async fn query_with_param_types_no_transaction() { } #[tokio::test] -async fn query_with_param_types_with_transaction() { +async fn query_typed_with_transaction() { let mut client = connect("user=postgres").await; client @@ -1011,7 +1011,7 @@ async fn query_with_param_types_with_transaction() { let transaction = client.transaction().await.unwrap(); let rows: Vec = transaction - .query_with_param_types( + .query_typed( "INSERT INTO foo (name, age) VALUES ($1, $2), ($3, $4), ($5, $6) returning name, age", &[ (&"alice", Type::TEXT), @@ -1038,7 +1038,7 @@ async fn query_with_param_types_with_transaction() { ); let rows: Vec = transaction - .query_with_param_types( + .query_typed( "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], ) From 0fa32471ef2e20b7f2e554d6d97cde3a67f1d494 Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Tue, 9 Jul 2024 17:59:39 -0700 Subject: [PATCH 751/819] Fix a clippy warning --- tokio-postgres/src/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index b54e095df..e304bbaea 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -54,7 +54,7 @@ where }; let responses = start(client, buf).await?; Ok(RowStream { - statement: statement, + statement, responses, rows_affected: None, _p: PhantomPinned, From 71c836b980799256a7f266195382fc8449fca5e4 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sat, 13 Jul 2024 20:45:32 -0400 Subject: [PATCH 752/819] query_typed tweaks --- postgres/src/client.rs | 65 ++++++++++++++++++++++++++++ postgres/src/generic_client.rs | 45 +++++++++++++++++++ postgres/src/transaction.rs | 29 +++++++++++++ tokio-postgres/src/client.rs | 63 +++++++++++++++++---------- tokio-postgres/src/generic_client.rs | 22 ++++++++++ tokio-postgres/src/query.rs | 63 +++++++++++---------------- tokio-postgres/src/transaction.rs | 27 ++++++++---- 7 files changed, 243 insertions(+), 71 deletions(-) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index c8e14cf81..42ce6dec9 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -257,6 +257,71 @@ impl Client { Ok(RowIter::new(self.connection.as_ref(), stream)) } + /// Like `query`, but requires the types of query parameters to be explicitly specified. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + pub fn query_typed( + &mut self, + query: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.connection + .block_on(self.client.query_typed(query, params)) + } + + /// The maximally flexible version of [`query_typed`]. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + /// + /// [`query_typed`]: #method.query_typed + /// + /// # Examples + /// ```no_run + /// # use postgres::{Client, NoTls}; + /// use postgres::types::{ToSql, Type}; + /// use fallible_iterator::FallibleIterator; + /// # fn main() -> Result<(), postgres::Error> { + /// # let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let params: Vec<(String, Type)> = vec![ + /// ("first param".into(), Type::TEXT), + /// ("second param".into(), Type::TEXT), + /// ]; + /// let mut it = client.query_typed_raw( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// params, + /// )?; + /// + /// while let Some(row) = it.next()? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn query_typed_raw(&mut self, query: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator, + { + let stream = self + .connection + .block_on(self.client.query_typed_raw(query, params))?; + Ok(RowIter::new(self.connection.as_ref(), stream)) + } + /// Creates a new prepared statement. /// /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), diff --git a/postgres/src/generic_client.rs b/postgres/src/generic_client.rs index 12f07465d..7b534867c 100644 --- a/postgres/src/generic_client.rs +++ b/postgres/src/generic_client.rs @@ -44,6 +44,19 @@ pub trait GenericClient: private::Sealed { I: IntoIterator, I::IntoIter: ExactSizeIterator; + /// Like [`Client::query_typed`] + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error>; + + /// Like [`Client::query_typed_raw`] + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send; + /// Like `Client::prepare`. fn prepare(&mut self, query: &str) -> Result; @@ -115,6 +128,22 @@ impl GenericClient for Client { self.query_raw(query, params) } + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params) + } + + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params) + } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } @@ -195,6 +224,22 @@ impl GenericClient for Transaction<'_> { self.query_raw(query, params) } + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params) + } + + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params) + } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 17c49c406..5c8c15973 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -115,6 +115,35 @@ impl<'a> Transaction<'a> { Ok(RowIter::new(self.connection.as_ref(), stream)) } + /// Like `Client::query_typed`. + pub fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_typed(statement, params), + ) + } + + /// Like `Client::query_typed_raw`. + pub fn query_typed_raw(&mut self, query: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator, + { + let stream = self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_typed_raw(query, params), + )?; + Ok(RowIter::new(self.connection.as_ref(), stream)) + } + /// Binds parameters to a statement, creating a "portal". /// /// Portals can be used with the `query_portal` method to page through the results of a query without being forced diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 2b29351a5..b04f05f88 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -333,7 +333,6 @@ impl Client { /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use tokio_postgres::types::ToSql; /// use futures_util::{pin_mut, TryStreamExt}; /// /// let params: Vec = vec![ @@ -373,43 +372,59 @@ impl Client { /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the /// parameter of the list provided, 1-indexed. + pub async fn query_typed( + &self, + query: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed_raw(query, params.iter().map(|(v, t)| (*v, t.clone()))) + .await? + .try_collect() + .await + } + + /// The maximally flexible version of [`query_typed`]. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + /// + /// [`query_typed`]: #method.query_typed /// /// # Examples /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use tokio_postgres::types::ToSql; - /// use tokio_postgres::types::Type; /// use futures_util::{pin_mut, TryStreamExt}; + /// use tokio_postgres::types::Type; /// - /// let rows = client.query_typed( + /// let params: Vec<(String, Type)> = vec![ + /// ("first param".into(), Type::TEXT), + /// ("second param".into(), Type::TEXT), + /// ]; + /// let mut it = client.query_typed_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// &[(&"first param", Type::TEXT), (&2i32, Type::INT4)], + /// params, /// ).await?; /// - /// for row in rows { - /// let foo: i32 = row.get("foo"); - /// println!("foo: {}", foo); + /// pin_mut!(it); + /// while let Some(row) = it.try_next().await? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); /// } /// # Ok(()) /// # } /// ``` - pub async fn query_typed( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result, Error> { - fn slice_iter<'a>( - s: &'a [(&'a (dyn ToSql + Sync), Type)], - ) -> impl ExactSizeIterator + 'a { - s.iter() - .map(|(param, param_type)| (*param as _, param_type.clone())) - } - - query::query_typed(&self.inner, statement, slice_iter(params)) - .await? - .try_collect() - .await + pub async fn query_typed_raw(&self, query: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator, + { + query::query_typed(&self.inner, query, params).await } /// Executes a statement, returning the number of rows modified. diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index b91d78064..6e7dffeb1 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -63,6 +63,12 @@ pub trait GenericClient: private::Sealed { params: &[(&(dyn ToSql + Sync), Type)], ) -> Result, Error>; + /// Like [`Client::query_typed_raw`] + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send; + /// Like [`Client::prepare`]. async fn prepare(&self, query: &str) -> Result; @@ -154,6 +160,14 @@ impl GenericClient for Client { self.query_typed(statement, params).await } + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } @@ -252,6 +266,14 @@ impl GenericClient for Transaction<'_> { self.query_typed(statement, params).await } + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index e304bbaea..be42d66b6 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -69,29 +69,21 @@ pub async fn query_typed<'a, P, I>( where P: BorrowToSql, I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - let (params, param_types): (Vec<_>, Vec<_>) = params.into_iter().unzip(); - - let params = params.into_iter(); - - let param_oids = param_types.iter().map(|t| t.oid()).collect::>(); - - let params = params.into_iter(); - - let buf = client.with_buf(|buf| { - frontend::parse("", query, param_oids.into_iter(), buf).map_err(Error::parse)?; - - encode_bind_with_statement_name_and_param_types("", ¶m_types, params, "", buf)?; - - frontend::describe(b'S', "", buf).map_err(Error::encode)?; - - frontend::execute("", 0, buf).map_err(Error::encode)?; + let buf = { + let params = params.into_iter().collect::>(); + let param_oids = params.iter().map(|(_, t)| t.oid()).collect::>(); - frontend::sync(buf); + client.with_buf(|buf| { + frontend::parse("", query, param_oids.into_iter(), buf).map_err(Error::parse)?; + encode_bind_raw("", params, "", buf)?; + frontend::describe(b'S', "", buf).map_err(Error::encode)?; + frontend::execute("", 0, buf).map_err(Error::encode)?; + frontend::sync(buf); - Ok(buf.split().freeze()) - })?; + Ok(buf.split().freeze()) + })? + }; let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; @@ -233,47 +225,42 @@ where I: IntoIterator, I::IntoIter: ExactSizeIterator, { - encode_bind_with_statement_name_and_param_types( + let params = params.into_iter(); + if params.len() != statement.params().len() { + return Err(Error::parameters(params.len(), statement.params().len())); + } + + encode_bind_raw( statement.name(), - statement.params(), - params, + params.zip(statement.params().iter().cloned()), portal, buf, ) } -fn encode_bind_with_statement_name_and_param_types( +fn encode_bind_raw( statement_name: &str, - param_types: &[Type], params: I, portal: &str, buf: &mut BytesMut, ) -> Result<(), Error> where P: BorrowToSql, - I: IntoIterator, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let params = params.into_iter(); - - if param_types.len() != params.len() { - return Err(Error::parameters(params.len(), param_types.len())); - } - let (param_formats, params): (Vec<_>, Vec<_>) = params - .zip(param_types.iter()) - .map(|(p, ty)| (p.borrow_to_sql().encode_format(ty) as i16, p)) + .into_iter() + .map(|(p, ty)| (p.borrow_to_sql().encode_format(&ty) as i16, (p, ty))) .unzip(); - let params = params.into_iter(); - let mut error_idx = 0; let r = frontend::bind( portal, statement_name, param_formats, - params.zip(param_types).enumerate(), - |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(ty, buf) { + params.into_iter().enumerate(), + |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(&ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), Err(e) => { diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index 3e62b2ac7..17a50b60f 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -149,6 +149,24 @@ impl<'a> Transaction<'a> { self.client.query_raw(statement, params).await } + /// Like `Client::query_typed`. + pub async fn query_typed( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.client.query_typed(statement, params).await + } + + /// Like `Client::query_typed_raw`. + pub async fn query_typed_raw(&self, query: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator, + { + self.client.query_typed_raw(query, params).await + } + /// Like `Client::execute`. pub async fn execute( &self, @@ -227,15 +245,6 @@ impl<'a> Transaction<'a> { query::query_portal(self.client.inner(), portal, max_rows).await } - /// Like `Client::query_typed`. - pub async fn query_typed( - &self, - statement: &str, - params: &[(&(dyn ToSql + Sync), Type)], - ) -> Result, Error> { - self.client.query_typed(statement, params).await - } - /// Like `Client::copy_in`. pub async fn copy_in(&self, statement: &T) -> Result, Error> where From a0b2d701ebee8fd5c5b3d6ee5cf0cde5d7f36a65 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2024 20:04:35 -0400 Subject: [PATCH 753/819] Fix cancellation of TransactionBuilder::start --- tokio-postgres/src/client.rs | 42 ++--------------------- tokio-postgres/src/transaction_builder.rs | 40 +++++++++++++++++++-- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index b04f05f88..92eabde36 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,4 +1,4 @@ -use crate::codec::{BackendMessages, FrontendMessage}; +use crate::codec::BackendMessages; use crate::config::SslMode; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; @@ -21,7 +21,7 @@ use fallible_iterator::FallibleIterator; use futures_channel::mpsc; use futures_util::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; -use postgres_protocol::message::{backend::Message, frontend}; +use postgres_protocol::message::backend::Message; use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; @@ -532,43 +532,7 @@ impl Client { /// /// The transaction will roll back by default - use the `commit` method to commit it. pub async fn transaction(&mut self) -> Result, Error> { - struct RollbackIfNotDone<'me> { - client: &'me Client, - done: bool, - } - - impl<'a> Drop for RollbackIfNotDone<'a> { - fn drop(&mut self) { - if self.done { - return; - } - - let buf = self.client.inner().with_buf(|buf| { - frontend::query("ROLLBACK", buf).unwrap(); - buf.split().freeze() - }); - let _ = self - .client - .inner() - .send(RequestMessages::Single(FrontendMessage::Raw(buf))); - } - } - - // This is done, as `Future` created by this method can be dropped after - // `RequestMessages` is synchronously send to the `Connection` by - // `batch_execute()`, but before `Responses` is asynchronously polled to - // completion. In that case `Transaction` won't be created and thus - // won't be rolled back. - { - let mut cleaner = RollbackIfNotDone { - client: self, - done: false, - }; - self.batch_execute("BEGIN").await?; - cleaner.done = true; - } - - Ok(Transaction::new(self)) + self.build_transaction().start().await } /// Returns a builder for a transaction with custom settings. diff --git a/tokio-postgres/src/transaction_builder.rs b/tokio-postgres/src/transaction_builder.rs index 9718ac588..93e9e9801 100644 --- a/tokio-postgres/src/transaction_builder.rs +++ b/tokio-postgres/src/transaction_builder.rs @@ -1,4 +1,6 @@ -use crate::{Client, Error, Transaction}; +use postgres_protocol::message::frontend; + +use crate::{codec::FrontendMessage, connection::RequestMessages, Client, Error, Transaction}; /// The isolation level of a database transaction. #[derive(Debug, Copy, Clone)] @@ -106,7 +108,41 @@ impl<'a> TransactionBuilder<'a> { query.push_str(s); } - self.client.batch_execute(&query).await?; + struct RollbackIfNotDone<'me> { + client: &'me Client, + done: bool, + } + + impl<'a> Drop for RollbackIfNotDone<'a> { + fn drop(&mut self) { + if self.done { + return; + } + + let buf = self.client.inner().with_buf(|buf| { + frontend::query("ROLLBACK", buf).unwrap(); + buf.split().freeze() + }); + let _ = self + .client + .inner() + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + } + + // This is done as `Future` created by this method can be dropped after + // `RequestMessages` is synchronously send to the `Connection` by + // `batch_execute()`, but before `Responses` is asynchronously polled to + // completion. In that case `Transaction` won't be created and thus + // won't be rolled back. + { + let mut cleaner = RollbackIfNotDone { + client: self.client, + done: false, + }; + self.client.batch_execute(&query).await?; + cleaner.done = true; + } Ok(Transaction::new(self.client)) } From c3580774fcdc4597dac81e1128ef8bef1e6ff3a7 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2024 20:23:50 -0400 Subject: [PATCH 754/819] Release postgres-protocol v0.6.7 --- postgres-protocol/CHANGELOG.md | 17 ++++++++++++++++- postgres-protocol/Cargo.toml | 2 +- postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index 1c371675c..54dce91b0 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,6 +1,21 @@ # Change Log -## v0.6.6 -2023-08-19 +## v0.6.7 - 2024-07-21 + +### Deprecated + +* Deprecated `ErrorField::value`. + +### Added + +* Added a `Clone` implementation for `DataRowBody`. +* Added `ErrorField::value_bytes`. + +### Changed + +* Upgraded `base64`. + +## v0.6.6 - 2023-08-19 ### Added diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index a8a130495..49cf2d59c 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-protocol" -version = "0.6.6" +version = "0.6.7" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 33296db2c..984fd186f 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -31,7 +31,7 @@ with-time-0_3 = ["time-03"] [dependencies] bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.6.5", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.7", path = "../postgres-protocol" } postgres-derive = { version = "0.4.5", optional = true, path = "../postgres-derive" } array-init = { version = "2", optional = true } diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 2e080cfb2..92f4ee696 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -54,7 +54,7 @@ parking_lot = "0.12" percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" -postgres-protocol = { version = "0.6.6", path = "../postgres-protocol" } +postgres-protocol = { version = "0.6.7", path = "../postgres-protocol" } postgres-types = { version = "0.2.5", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } From 6b4566b132ca4a81c06eaf35eb63318a69360f48 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2024 20:28:22 -0400 Subject: [PATCH 755/819] Release postgres-types v0.2.7 --- postgres-types/CHANGELOG.md | 8 ++++++++ postgres-types/Cargo.toml | 2 +- tokio-postgres/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 157a2cc7d..1e5cd31d8 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -2,9 +2,17 @@ ## Unreleased +## v0.2.7 - 2024-07-21 + +### Added + +* Added `Default` implementation for `Json`. +* Added a `js` feature for WASM compatibility. + ### Changed * `FromStr` implementation for `PgLsn` no longer allocates a `Vec` when splitting an lsn string on it's `/`. +* The `eui48-1` feature no longer enables default features of the `eui48` library. ## v0.2.6 - 2023-08-19 diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index 984fd186f..e2d21b358 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres-types" -version = "0.2.6" +version = "0.2.7" authors = ["Steven Fackler "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index 92f4ee696..f762b1184 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -55,7 +55,7 @@ percent-encoding = "2.0" pin-project-lite = "0.2" phf = "0.11" postgres-protocol = { version = "0.6.7", path = "../postgres-protocol" } -postgres-types = { version = "0.2.5", path = "../postgres-types" } +postgres-types = { version = "0.2.7", path = "../postgres-types" } tokio = { version = "1.27", features = ["io-util"] } tokio-util = { version = "0.7", features = ["codec"] } rand = "0.8.5" From 92266188e8fd081be8e29d425b9fd334d2039196 Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2024 20:36:18 -0400 Subject: [PATCH 756/819] Release tokio-postgres v0.7.11 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/Cargo.toml | 2 +- tokio-postgres/CHANGELOG.md | 24 +++++++++++++++++++++--- tokio-postgres/Cargo.toml | 2 +- 5 files changed, 25 insertions(+), 7 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 936eeeaa4..6c17d0889 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -19,7 +19,7 @@ runtime = ["tokio-postgres/runtime"] native-tls = "0.2" tokio = "1.0" tokio-native-tls = "0.3" -tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } [dev-dependencies] futures-util = "0.3" diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index b7ebd3385..7c19070bf 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -19,7 +19,7 @@ runtime = ["tokio-postgres/runtime"] openssl = "0.10" tokio = "1.0" tokio-openssl = "0.6" -tokio-postgres = { version = "0.7.0", path = "../tokio-postgres", default-features = false } +tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } [dev-dependencies] futures-util = "0.3" diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index 2ff3c875e..f1dc3c685 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -40,7 +40,7 @@ bytes = "1.0" fallible-iterator = "0.2" futures-util = { version = "0.3.14", features = ["sink"] } log = "0.4" -tokio-postgres = { version = "0.7.10", path = "../tokio-postgres" } +tokio-postgres = { version = "0.7.11", path = "../tokio-postgres" } tokio = { version = "1.0", features = ["rt", "time"] } [dev-dependencies] diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index 775c22e34..e0be26296 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -2,10 +2,28 @@ ## Unreleased +## v0.7.11 - 2024-07-21 + +### Fixed + +* Fixed handling of non-UTF8 error fields which can be sent after failed handshakes. +* Fixed cancellation handling of `TransactionBuilder::start` futures. + +### Added + +* Added `table_oid` and `field_id` fields to `Columns` struct of prepared statements. +* Added `GenericClient::simple_query`. +* Added `#[track_caller]` to `Row::get` and `SimpleQueryRow::get`. +* Added `TargetSessionAttrs::ReadOnly`. +* Added `Debug` implementation for `Statement`. +* Added `Clone` implementation for `Row`. +* Added `SimpleQueryMessage::RowDescription`. +* Added `{Client, Transaction, GenericClient}::query_typed`. + +### Changed + * Disable `rustc-serialize` compatibility of `eui48-1` dependency -* Remove tests for `eui48-04` -* Add `table_oid` and `field_id` fields to `Columns` struct of prepared statements. -* Add `GenericClient::simple_query`. +* Config setters now take `impl Into`. ## v0.7.10 - 2023-08-25 diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index f762b1184..c2f80dc7e 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tokio-postgres" -version = "0.7.10" +version = "0.7.11" authors = ["Steven Fackler "] edition = "2018" license = "MIT OR Apache-2.0" From 9f196e7f5ba6067efe55f758d743cdfd9b606cff Mon Sep 17 00:00:00 2001 From: Steven Fackler Date: Sun, 21 Jul 2024 20:38:52 -0400 Subject: [PATCH 757/819] Release postgres v0.19.8 --- postgres-native-tls/Cargo.toml | 2 +- postgres-openssl/Cargo.toml | 2 +- postgres/CHANGELOG.md | 6 ++++++ postgres/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index 6c17d0889..02259b3dc 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-featu [dev-dependencies] futures-util = "0.3" tokio = { version = "1.0", features = ["macros", "net", "rt"] } -postgres = { version = "0.19.0", path = "../postgres" } +postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index 7c19070bf..9013384a2 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -24,4 +24,4 @@ tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-featu [dev-dependencies] futures-util = "0.3" tokio = { version = "1.0", features = ["macros", "net", "rt"] } -postgres = { version = "0.19.0", path = "../postgres" } +postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 7f856b5ac..258cdb518 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## v0.19.8 - 2024-07-21 + +### Added + +* Added `{Client, Transaction, GenericClient}::query_typed`. + ## v0.19.7 - 2023-08-25 ## Fixed diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index f1dc3c685..ff95c4f14 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "postgres" -version = "0.19.7" +version = "0.19.8" authors = ["Steven Fackler "] edition = "2018" license = "MIT OR Apache-2.0" From 6de0fceebe3c1800a5e812f35449e464fbc33f55 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Mon, 22 Jul 2024 15:54:40 -0400 Subject: [PATCH 758/819] Add jiff support --- postgres-types/CHANGELOG.md | 4 ++ postgres-types/Cargo.toml | 2 + postgres-types/src/jiff_01.rs | 118 ++++++++++++++++++++++++++++++++++ postgres-types/src/lib.rs | 7 ++ postgres/CHANGELOG.md | 6 ++ postgres/Cargo.toml | 1 + tokio-postgres/CHANGELOG.md | 4 ++ tokio-postgres/Cargo.toml | 2 + tokio-postgres/src/lib.rs | 1 + 9 files changed, 145 insertions(+) create mode 100644 postgres-types/src/jiff_01.rs diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index 1e5cd31d8..b11e18d32 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + ## v0.2.7 - 2024-07-21 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index e2d21b358..941f4fcc4 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -21,6 +21,7 @@ with-eui48-0_4 = ["eui48-04"] with-eui48-1 = ["eui48-1"] with-geo-types-0_6 = ["geo-types-06"] with-geo-types-0_7 = ["geo-types-0_7"] +with-jiff-0_1 = ["jiff-01"] with-serde_json-1 = ["serde-1", "serde_json-1"] with-smol_str-01 = ["smol_str-01"] with-uuid-0_8 = ["uuid-08"] @@ -46,6 +47,7 @@ eui48-04 = { version = "0.4", package = "eui48", optional = true } eui48-1 = { version = "1.0", package = "eui48", optional = true, default-features = false } geo-types-06 = { version = "0.6", package = "geo-types", optional = true } geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } +jiff-01 = { version = "0.1", package = "jiff", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs new file mode 100644 index 000000000..eec6aa2f8 --- /dev/null +++ b/postgres-types/src/jiff_01.rs @@ -0,0 +1,118 @@ +use bytes::BytesMut; +use jiff_01::{ + civil::{Date, DateTime, Time}, + tz::TimeZone, + Span, Timestamp as JiffTimestamp, Zoned, +}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +const fn base() -> DateTime { + DateTime::constant(2000, 1, 1, 0, 0, 0, 0) +} + +impl<'a> FromSql<'a> for DateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let t = types::timestamp_from_sql(raw)?; + Ok(base().checked_add(Span::new().microseconds(t))?) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for DateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::timestamp_to_sql(self.since(base())?.get_microseconds(), w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for JiffTimestamp { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { + Ok(DateTime::from_sql(type_, raw)? + .to_zoned(TimeZone::UTC)? + .timestamp()) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for JiffTimestamp { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::timestamp_to_sql( + self.since(base().to_zoned(TimeZone::UTC)?)? + .get_microseconds(), + w, + ); + Ok(IsNull::No) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Zoned { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { + Ok(JiffTimestamp::from_sql(type_, raw)?.to_zoned(TimeZone::UTC)) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for Zoned { + fn to_sql( + &self, + type_: &Type, + w: &mut BytesMut, + ) -> Result> { + self.timestamp().to_sql(type_, w) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let jd = types::date_from_sql(raw)?; + Ok(base().date().checked_add(Span::new().days(jd))?) + } + + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let jd = self.since(base().date())?.get_days(); + types::date_to_sql(jd, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let usec = types::time_from_sql(raw)?; + Ok(Time::midnight() + Span::new().microseconds(usec)) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let delta = self.since(Time::midnight())?; + types::time_to_sql(delta.get_microseconds(), w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 492039766..7d6d976c6 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -276,6 +276,8 @@ mod eui48_1; mod geo_types_06; #[cfg(feature = "with-geo-types-0_7")] mod geo_types_07; +#[cfg(feature = "with-jiff-0_1")] +mod jiff_01; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; #[cfg(feature = "with-smol_str-01")] @@ -491,6 +493,11 @@ impl WrongType { /// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | /// | `time::Time` | TIME | +/// | `jiff::civil::DateTime` | TIMESTAMP | +/// | `jiff::Timestamp` | TIMESTAMP WITH TIME ZONE | +/// | `jiff::Zoned` | TIMESTAMP WITH TIME ZONE | +/// | `jiff::civil::Date` | DATE | +/// | `jiff::civil::Time` | TIME | /// | `eui48::MacAddress` | MACADDR | /// | `geo_types::Point` | POINT | /// | `geo_types::Rect` | BOX | diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 258cdb518..6feb629e4 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,11 @@ # Change Log +## Unreleased + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + ## v0.19.8 - 2024-07-21 ### Added diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index ff95c4f14..e0e580f7d 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -28,6 +28,7 @@ with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] with-eui48-1 = ["tokio-postgres/with-eui48-1"] with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] +with-jiff-0_1 = ["tokio-postgres/with-jiff-0_1"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] with-smol_str-01 = ["tokio-postgres/with-smol_str-01"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index e0be26296..bf17ec486 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -2,6 +2,10 @@ ## Unreleased +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + ## v0.7.11 - 2024-07-21 ### Fixed diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index c2f80dc7e..e1e84f7b1 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -34,6 +34,7 @@ with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] with-eui48-1 = ["postgres-types/with-eui48-1"] with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] +with-jiff-0_1 = ["postgres-types/with-jiff-0_1"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] with-smol_str-01 = ["postgres-types/with-smol_str-01"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] @@ -81,6 +82,7 @@ chrono-04 = { version = "0.4", package = "chrono", default-features = false } eui48-1 = { version = "1.0", package = "eui48", default-features = false } geo-types-06 = { version = "0.6", package = "geo-types" } geo-types-07 = { version = "0.7", package = "geo-types" } +jiff-01 = { version = "0.1", package = "jiff" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } smol_str-01 = { version = "0.1", package = "smol_str" } diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index a603158fb..ec843d511 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -111,6 +111,7 @@ //! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | //! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | //! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | +//! | `with-jiff-0_1` | Enable support for the 0.1 version of the `jiff` crate. | [jiff](https://crates.io/crates/jiff/0.1.0) 0.1 | no | //! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | //! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | From 0fc4005ed31e3705a04cb7e58eb220d89b922dd0 Mon Sep 17 00:00:00 2001 From: Ramnivas Laddad Date: Mon, 22 Jul 2024 15:07:44 -0700 Subject: [PATCH 759/819] For `query_typed`, deal with the no-data case. If a query returns no data, we receive `Message::NoData`, which signals the completion of the query. However, we treated it as a no-op, leading to processing other messages and eventual failure. This PR fixes the issue and updates the `query_typed` tests to cover this scenario. --- tokio-postgres/src/query.rs | 13 +++++++++---- tokio-postgres/tests/test/main.rs | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index be42d66b6..3ab002871 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -89,10 +89,15 @@ where loop { match responses.next().await? { - Message::ParseComplete - | Message::BindComplete - | Message::ParameterDescription(_) - | Message::NoData => {} + Message::ParseComplete | Message::BindComplete | Message::ParameterDescription(_) => {} + Message::NoData => { + return Ok(RowStream { + statement: Statement::unnamed(vec![], vec![]), + responses, + rows_affected: None, + _p: PhantomPinned, + }); + } Message::RowDescription(row_description) => { let mut columns: Vec = vec![]; let mut it = row_description.fields(); diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index 84c46d101..9a6aa26fe 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -997,6 +997,13 @@ async fn query_typed_no_transaction() { assert_eq!(second_row.get::<_, i32>(1), 40); assert_eq!(second_row.get::<_, &str>(2), "literal"); assert_eq!(second_row.get::<_, i32>(3), 5); + + // Test for UPDATE that returns no data + let updated_rows = client + .query_typed("UPDATE foo set age = 33", &[]) + .await + .unwrap(); + assert_eq!(updated_rows.len(), 0); } #[tokio::test] @@ -1064,4 +1071,11 @@ async fn query_typed_with_transaction() { assert_eq!(second_row.get::<_, i32>(1), 40); assert_eq!(second_row.get::<_, &str>(2), "literal"); assert_eq!(second_row.get::<_, i32>(3), 5); + + // Test for UPDATE that returns no data + let updated_rows = transaction + .query_typed("UPDATE foo set age = 33", &[]) + .await + .unwrap(); + assert_eq!(updated_rows.len(), 0); } From aa10f0d75cb23757c9a87fe58363e4e26ae19d1e Mon Sep 17 00:00:00 2001 From: Qiu Chaofan Date: Tue, 23 Jul 2024 13:36:51 +0800 Subject: [PATCH 760/819] Support AIX keepalive --- tokio-postgres/src/keepalive.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs index c409eb0ea..7bdd76341 100644 --- a/tokio-postgres/src/keepalive.rs +++ b/tokio-postgres/src/keepalive.rs @@ -12,12 +12,18 @@ impl From<&KeepaliveConfig> for TcpKeepalive { fn from(keepalive_config: &KeepaliveConfig) -> Self { let mut tcp_keepalive = Self::new().with_time(keepalive_config.idle); - #[cfg(not(any(target_os = "redox", target_os = "solaris", target_os = "openbsd")))] + #[cfg(not(any( + target_os = "aix", + target_os = "redox", + target_os = "solaris", + target_os = "openbsd" + )))] if let Some(interval) = keepalive_config.interval { tcp_keepalive = tcp_keepalive.with_interval(interval); } #[cfg(not(any( + target_os = "aix", target_os = "redox", target_os = "solaris", target_os = "windows", From df2f37d848f5779ed1dc6c1a8f8ded32a15e70c3 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Tue, 23 Jul 2024 07:54:19 -0400 Subject: [PATCH 761/819] Remove unecessary alias for Timestamp --- postgres-types/src/jiff_01.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index eec6aa2f8..c2e4ef06e 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -2,7 +2,7 @@ use bytes::BytesMut; use jiff_01::{ civil::{Date, DateTime, Time}, tz::TimeZone, - Span, Timestamp as JiffTimestamp, Zoned, + Span, Timestamp, Zoned, }; use postgres_protocol::types; use std::error::Error; @@ -32,8 +32,8 @@ impl ToSql for DateTime { to_sql_checked!(); } -impl<'a> FromSql<'a> for JiffTimestamp { - fn from_sql(type_: &Type, raw: &[u8]) -> Result> { +impl<'a> FromSql<'a> for Timestamp { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { Ok(DateTime::from_sql(type_, raw)? .to_zoned(TimeZone::UTC)? .timestamp()) @@ -42,7 +42,7 @@ impl<'a> FromSql<'a> for JiffTimestamp { accepts!(TIMESTAMPTZ); } -impl ToSql for JiffTimestamp { +impl ToSql for Timestamp { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { types::timestamp_to_sql( self.since(base().to_zoned(TimeZone::UTC)?)? @@ -58,7 +58,7 @@ impl ToSql for JiffTimestamp { impl<'a> FromSql<'a> for Zoned { fn from_sql(type_: &Type, raw: &[u8]) -> Result> { - Ok(JiffTimestamp::from_sql(type_, raw)?.to_zoned(TimeZone::UTC)) + Ok(Timestamp::from_sql(type_, raw)?.to_zoned(TimeZone::UTC)) } accepts!(TIMESTAMPTZ); From f00d208959c8c76c9bbe943f53a9a261ef1d2315 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Tue, 23 Jul 2024 07:56:00 -0400 Subject: [PATCH 762/819] Update impl for Timestamp The impl now directly computes `Timestamp` rather than going through `DateTime` and `Zoned`. --- postgres-types/src/jiff_01.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index c2e4ef06e..d8f8723b6 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -13,6 +13,13 @@ const fn base() -> DateTime { DateTime::constant(2000, 1, 1, 0, 0, 0, 0) } +/// The number of seconds from 2000-01-01 00:00:00 UTC to the Unix epoch. +const Y2K_EPOCH: i64 = 946684800; + +fn base_ts() -> Timestamp { + Timestamp::new(Y2K_EPOCH, 0).unwrap() +} + impl<'a> FromSql<'a> for DateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; @@ -33,10 +40,9 @@ impl ToSql for DateTime { } impl<'a> FromSql<'a> for Timestamp { - fn from_sql(type_: &Type, raw: &[u8]) -> Result> { - Ok(DateTime::from_sql(type_, raw)? - .to_zoned(TimeZone::UTC)? - .timestamp()) + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let t = types::timestamp_from_sql(raw)?; + Ok(base_ts().checked_add(Span::new().microseconds(t))?) } accepts!(TIMESTAMPTZ); @@ -44,11 +50,7 @@ impl<'a> FromSql<'a> for Timestamp { impl ToSql for Timestamp { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::timestamp_to_sql( - self.since(base().to_zoned(TimeZone::UTC)?)? - .get_microseconds(), - w, - ); + types::timestamp_to_sql(self.since(base_ts())?.get_microseconds(), w); Ok(IsNull::No) } From 815a5d3ae9a580dcc6db3312c8945417eac680f2 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Tue, 23 Jul 2024 07:58:47 -0400 Subject: [PATCH 763/819] Remove impl for `Zoned` `Timestamp` already has impl and is semantically accurate for mapping to `timestamptz`, unlike `Zoned`. End users can do their own conversions from `Timestamp` to `Zoned` if desired. --- postgres-types/src/jiff_01.rs | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index d8f8723b6..8a0a38f7c 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -58,27 +58,6 @@ impl ToSql for Timestamp { to_sql_checked!(); } -impl<'a> FromSql<'a> for Zoned { - fn from_sql(type_: &Type, raw: &[u8]) -> Result> { - Ok(Timestamp::from_sql(type_, raw)?.to_zoned(TimeZone::UTC)) - } - - accepts!(TIMESTAMPTZ); -} - -impl ToSql for Zoned { - fn to_sql( - &self, - type_: &Type, - w: &mut BytesMut, - ) -> Result> { - self.timestamp().to_sql(type_, w) - } - - accepts!(TIMESTAMPTZ); - to_sql_checked!(); -} - impl<'a> FromSql<'a> for Date { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; From e19b3dc164ba0cc4f1e601149dc7e7b2837e7276 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Wed, 14 Aug 2024 09:00:42 -0400 Subject: [PATCH 764/819] Rename PG_EPOCH --- postgres-types/src/jiff_01.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index 8a0a38f7c..871d35f62 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -1,8 +1,7 @@ use bytes::BytesMut; use jiff_01::{ civil::{Date, DateTime, Time}, - tz::TimeZone, - Span, Timestamp, Zoned, + Span, Timestamp, }; use postgres_protocol::types; use std::error::Error; @@ -13,11 +12,11 @@ const fn base() -> DateTime { DateTime::constant(2000, 1, 1, 0, 0, 0, 0) } -/// The number of seconds from 2000-01-01 00:00:00 UTC to the Unix epoch. -const Y2K_EPOCH: i64 = 946684800; +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +const PG_EPOCH: i64 = 946684800; fn base_ts() -> Timestamp { - Timestamp::new(Y2K_EPOCH, 0).unwrap() + Timestamp::new(PG_EPOCH, 0).unwrap() } impl<'a> FromSql<'a> for DateTime { From c96342d7f6e1b86db752e96482ad372024062fab Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Wed, 14 Aug 2024 09:14:41 -0400 Subject: [PATCH 765/819] Fix ToSql This sets the smallest unit to microseconds when calculating time deltas. Previously, the number of microseconds was expressed improperly because the rounding was not set. --- postgres-types/src/jiff_01.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index 871d35f62..54768c10d 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -1,7 +1,7 @@ use bytes::BytesMut; use jiff_01::{ civil::{Date, DateTime, Time}, - Span, Timestamp, + Span, SpanRound, Timestamp, Unit, }; use postgres_protocol::types; use std::error::Error; @@ -12,13 +12,17 @@ const fn base() -> DateTime { DateTime::constant(2000, 1, 1, 0, 0, 0, 0) } -/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. const PG_EPOCH: i64 = 946684800; fn base_ts() -> Timestamp { Timestamp::new(PG_EPOCH, 0).unwrap() } +fn round_us<'a>() -> SpanRound<'a> { + SpanRound::new().largest(Unit::Microsecond) +} + impl<'a> FromSql<'a> for DateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; @@ -30,7 +34,8 @@ impl<'a> FromSql<'a> for DateTime { impl ToSql for DateTime { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::timestamp_to_sql(self.since(base())?.get_microseconds(), w); + let span = self.since(base())?.round(round_us())?; + types::timestamp_to_sql(span.get_microseconds(), w); Ok(IsNull::No) } @@ -49,7 +54,8 @@ impl<'a> FromSql<'a> for Timestamp { impl ToSql for Timestamp { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::timestamp_to_sql(self.since(base_ts())?.get_microseconds(), w); + let span = self.since(base_ts())?.round(round_us())?; + types::timestamp_to_sql(span.get_microseconds(), w); Ok(IsNull::No) } @@ -88,8 +94,8 @@ impl<'a> FromSql<'a> for Time { impl ToSql for Time { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let delta = self.since(Time::midnight())?; - types::time_to_sql(delta.get_microseconds(), w); + let span = self.since(Time::midnight())?.round(round_us())?; + types::time_to_sql(span.get_microseconds(), w); Ok(IsNull::No) } From afef88efb6a2555cf0bed88bd93d5e48f42bffe9 Mon Sep 17 00:00:00 2001 From: Allan Zhang Date: Wed, 14 Aug 2024 20:39:55 -0400 Subject: [PATCH 766/819] Add jiff tests and overflow checks This adds tests in the same fashion as the existing ones for `chrono` and `time`. Overflow is now handled using fallible operations. For example, `Span:microseconds` is replaced with `Span::try_microseconds`. Postgres infinity values are workiing as expected. All tests are passing. --- postgres-types/src/jiff_01.rs | 71 +++++++-- tokio-postgres/tests/test/types/jiff_01.rs | 175 +++++++++++++++++++++ tokio-postgres/tests/test/types/mod.rs | 2 + 3 files changed, 231 insertions(+), 17 deletions(-) create mode 100644 tokio-postgres/tests/test/types/jiff_01.rs diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs index 54768c10d..d3215c0e6 100644 --- a/postgres-types/src/jiff_01.rs +++ b/postgres-types/src/jiff_01.rs @@ -23,10 +23,27 @@ fn round_us<'a>() -> SpanRound<'a> { SpanRound::new().largest(Unit::Microsecond) } +fn decode_err(_e: E) -> Box +where + E: Error, +{ + "value too large to decode".into() +} + +fn transmit_err(_e: E) -> Box +where + E: Error, +{ + "value too large to transmit".into() +} + impl<'a> FromSql<'a> for DateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let t = types::timestamp_from_sql(raw)?; - Ok(base().checked_add(Span::new().microseconds(t))?) + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base().checked_add(s)) + .map_err(decode_err) } accepts!(TIMESTAMP); @@ -34,8 +51,12 @@ impl<'a> FromSql<'a> for DateTime { impl ToSql for DateTime { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let span = self.since(base())?.round(round_us())?; - types::timestamp_to_sql(span.get_microseconds(), w); + let v = self + .since(base()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); Ok(IsNull::No) } @@ -45,8 +66,11 @@ impl ToSql for DateTime { impl<'a> FromSql<'a> for Timestamp { fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let t = types::timestamp_from_sql(raw)?; - Ok(base_ts().checked_add(Span::new().microseconds(t))?) + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base_ts().checked_add(s)) + .map_err(decode_err) } accepts!(TIMESTAMPTZ); @@ -54,8 +78,12 @@ impl<'a> FromSql<'a> for Timestamp { impl ToSql for Timestamp { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let span = self.since(base_ts())?.round(round_us())?; - types::timestamp_to_sql(span.get_microseconds(), w); + let v = self + .since(base_ts()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); Ok(IsNull::No) } @@ -65,17 +93,19 @@ impl ToSql for Timestamp { impl<'a> FromSql<'a> for Date { fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let jd = types::date_from_sql(raw)?; - Ok(base().date().checked_add(Span::new().days(jd))?) + let v = types::date_from_sql(raw)?; + Span::new() + .try_days(v) + .and_then(|s| base().date().checked_add(s)) + .map_err(decode_err) } - accepts!(DATE); } impl ToSql for Date { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let jd = self.since(base().date())?.get_days(); - types::date_to_sql(jd, w); + let v = self.since(base().date()).map_err(transmit_err)?.get_days(); + types::date_to_sql(v, w); Ok(IsNull::No) } @@ -85,8 +115,11 @@ impl ToSql for Date { impl<'a> FromSql<'a> for Time { fn from_sql(_: &Type, raw: &[u8]) -> Result> { - let usec = types::time_from_sql(raw)?; - Ok(Time::midnight() + Span::new().microseconds(usec)) + let v = types::time_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| Time::midnight().checked_add(s)) + .map_err(decode_err) } accepts!(TIME); @@ -94,8 +127,12 @@ impl<'a> FromSql<'a> for Time { impl ToSql for Time { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let span = self.since(Time::midnight())?.round(round_us())?; - types::time_to_sql(span.get_microseconds(), w); + let v = self + .since(Time::midnight()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::time_to_sql(v, w); Ok(IsNull::No) } diff --git a/tokio-postgres/tests/test/types/jiff_01.rs b/tokio-postgres/tests/test/types/jiff_01.rs new file mode 100644 index 000000000..7c9052676 --- /dev/null +++ b/tokio-postgres/tests/test/types/jiff_01.rs @@ -0,0 +1,175 @@ +use jiff_01::{ + civil::{Date as JiffDate, DateTime, Time}, + Timestamp as JiffTimestamp, +}; +use std::fmt; +use tokio_postgres::{ + types::{Date, FromSqlOwned, Timestamp}, + Client, +}; + +use crate::connect; +use crate::types::test_type; + +#[tokio::test] +async fn test_datetime_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_datetime_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_timestamp_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_timestamp_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_params() { + fn make_check(s: &str) -> (Date, &str) { + (Date::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_time_params() { + fn make_check(s: &str) -> (Option