From c97a10612521c7a6e7bcdc4a1d90539fc33a0938 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Dec 2020 12:55:28 -0800 Subject: [PATCH 001/420] docs(lib): enable doc_cfg on docs.rs builds (#2372) --- .github/workflows/CI.yml | 2 +- src/lib.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 8b7be8c597..7aeceb3f10 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -132,4 +132,4 @@ jobs: uses: actions-rs/cargo@v1 with: command: rustdoc - args: --features full -- -D broken-intra-doc-links + args: --features full -- --cfg docsrs -D broken-intra-doc-links diff --git a/src/lib.rs b/src/lib.rs index fb1185a84d..0dadfab098 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,6 +4,7 @@ #![cfg_attr(test, deny(rust_2018_idioms))] #![cfg_attr(test, deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] +#![cfg_attr(docsrs, feature(doc_cfg))] //! # hyper //! From 3d6bdbe85006e623eae83d7c3ce9a45680d6c2c8 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Dec 2020 13:01:14 -0800 Subject: [PATCH 002/420] v0.14.1 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86b3f6319f..29d29b39c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +### v0.14.1 (2020-12-23) + +* Fixes building documentation. + + ## v0.14.0 (2020-12-23) diff --git a/Cargo.toml b/Cargo.toml index ef9a09680c..88a231b12e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.0" # don't forget to update html_root_url +version = "0.14.1" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" diff --git a/src/lib.rs b/src/lib.rs index 0dadfab098..ed457ddeb0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.14.0")] +#![doc(html_root_url = "https://docs.rs/hyper/0.14.1")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] From 73a59e5fc7ddedcb7cbd91e97b33385fde57aa10 Mon Sep 17 00:00:00 2001 From: Nikhil Benesch Date: Mon, 28 Dec 2020 18:50:28 -0500 Subject: [PATCH 003/420] feat(client): expose `connect` types without proto feature (#2377) Make it possible to refer to Connected, Connection, HttpConnector, etc. without enabling either of the http1/http2 features. This makes feature selection work better for downstream libraries like hyper-openssl, which don't want to commit to any particular protocol. Fix #2376. --- src/client/client.rs | 1233 +++++++++++++++++++++++++++++++++++ src/client/connect/http.rs | 1 + src/client/connect/mod.rs | 22 +- src/client/mod.rs | 1237 +----------------------------------- src/client/pool.rs | 2 +- src/lib.rs | 10 +- src/proto/h1/io.rs | 2 + 7 files changed, 1268 insertions(+), 1239 deletions(-) create mode 100644 src/client/client.rs diff --git a/src/client/client.rs b/src/client/client.rs new file mode 100644 index 0000000000..3ab708471a --- /dev/null +++ b/src/client/client.rs @@ -0,0 +1,1233 @@ +use std::error::Error as StdError; +use std::fmt; +use std::mem; +use std::time::Duration; + +use futures_channel::oneshot; +use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; +use http::header::{HeaderValue, HOST}; +use http::uri::Scheme; +use http::{Method, Request, Response, Uri, Version}; + +use super::conn; +use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; +use super::pool::{self, Key as PoolKey, Pool, Poolable, Pooled, Reservation}; +#[cfg(feature = "tcp")] +use super::HttpConnector; +use crate::body::{Body, HttpBody}; +use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll}; +use crate::rt::Executor; + +/// A Client to make outgoing HTTP requests. +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +pub struct Client { + config: Config, + conn_builder: conn::Builder, + connector: C, + pool: Pool>, +} + +#[derive(Clone, Copy, Debug)] +struct Config { + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, +} + +/// A `Future` that will resolve to an HTTP Response. +/// +/// This is returned by `Client::request` (and `Client::get`). +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: Pin>> + Send>>, +} + +// ===== impl Client ===== + +#[cfg(feature = "tcp")] +impl Client { + /// Create a new Client with the default [config](Builder). + /// + /// # Note + /// + /// The default connector does **not** handle TLS. Speaking to `https` + /// destinations will require [configuring a connector that implements + /// TLS](https://hyper.rs/guides/client/configuration). + #[inline] + pub fn new() -> Client { + Builder::default().build_http() + } +} + +#[cfg(feature = "tcp")] +impl Default for Client { + fn default() -> Client { + Client::new() + } +} + +impl Client<(), Body> { + /// Create a builder to configure a new `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use std::time::Duration; + /// use hyper::Client; + /// + /// let client = Client::builder() + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .http2_only(true) + /// .build_http(); + /// # let infer: Client<_, hyper::Body> = client; + /// # drop(infer); + /// # } + /// # fn main() {} + /// ``` + #[inline] + pub fn builder() -> Builder { + Builder::default() + } +} + +impl Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + /// Send a `GET` request to the supplied `Uri`. + /// + /// # Note + /// + /// This requires that the `HttpBody` type have a `Default` implementation. + /// It *should* return an "empty" version of itself, such that + /// `HttpBody::is_end_stream` is `true`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use hyper::{Client, Uri}; + /// + /// let client = Client::new(); + /// + /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); + /// # } + /// # fn main() {} + /// ``` + pub fn get(&self, uri: Uri) -> ResponseFuture + where + B: Default, + { + let body = B::default(); + if !body.is_end_stream() { + warn!("default HttpBody used for get() does not return true for is_end_stream"); + } + + let mut req = Request::new(body); + *req.uri_mut() = uri; + self.request(req) + } + + /// Send a constructed `Request` using this `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "runtime")] + /// # fn run () { + /// use hyper::{Body, Client, Request}; + /// + /// let client = Client::new(); + /// + /// let req = Request::builder() + /// .method("POST") + /// .uri("http://httpin.org/post") + /// .body(Body::from("Hallo!")) + /// .expect("request builder"); + /// + /// let future = client.request(req); + /// # } + /// # fn main() {} + /// ``` + pub fn request(&self, mut req: Request) -> ResponseFuture { + let is_http_connect = req.method() == Method::CONNECT; + match req.version() { + Version::HTTP_11 => (), + Version::HTTP_10 => { + if is_http_connect { + warn!("CONNECT is not allowed for HTTP/1.0"); + return ResponseFuture::new(Box::new(future::err( + crate::Error::new_user_unsupported_request_method(), + ))); + } + } + other_h2 @ Version::HTTP_2 => { + if self.config.ver != Ver::Http2 { + return ResponseFuture::error_version(other_h2); + } + } + // completely unsupported HTTP version (like HTTP/0.9)! + other => return ResponseFuture::error_version(other), + }; + + let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { + Ok(s) => s, + Err(err) => { + return ResponseFuture::new(Box::new(future::err(err))); + } + }; + + ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key))) + } + + fn retryably_send_request( + &self, + req: Request, + pool_key: PoolKey, + ) -> impl Future>> { + let client = self.clone(); + let uri = req.uri().clone(); + + let mut send_fut = client.send_request(req, pool_key.clone()); + future::poll_fn(move |cx| loop { + match ready!(Pin::new(&mut send_fut).poll(cx)) { + Ok(resp) => return Poll::Ready(Ok(resp)), + Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)), + Err(ClientError::Canceled { + connection_reused, + mut req, + reason, + }) => { + if !client.config.retry_canceled_requests || !connection_reused { + // if client disabled, don't retry + // a fresh connection means we definitely can't retry + return Poll::Ready(Err(reason)); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + reason + ); + *req.uri_mut() = uri.clone(); + send_fut = client.send_request(req, pool_key.clone()); + } + } + }) + } + + fn send_request( + &self, + mut req: Request, + pool_key: PoolKey, + ) -> impl Future, ClientError>> + Unpin { + let conn = self.connection_for(pool_key); + + let set_host = self.config.set_host; + let executor = self.conn_builder.exec.clone(); + conn.and_then(move |mut pooled| { + if pooled.is_http1() { + if set_host { + let uri = req.uri().clone(); + req.headers_mut().entry(HOST).or_insert_with(|| { + let hostname = uri.host().expect("authority implies host"); + if let Some(port) = uri.port() { + let s = format!("{}:{}", hostname, port); + HeaderValue::from_str(&s) + } else { + HeaderValue::from_str(hostname) + } + .expect("uri host is valid header value") + }); + } + + // CONNECT always sends authority-form, so check it first... + if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.conn_info.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + }; + } else if req.method() == Method::CONNECT { + debug!("client does not support CONNECT requests over HTTP2"); + return Either::Left(future::err(ClientError::Normal( + crate::Error::new_user_unsupported_request_method(), + ))); + } + + let fut = pooled + .send_request_retryable(req) + .map_err(ClientError::map_with_reused(pooled.is_reused())); + + // If the Connector included 'extra' info, add to Response... + let extra_info = pooled.conn_info.extra.clone(); + let fut = fut.map_ok(move |mut res| { + if let Some(extra) = extra_info { + extra.set(res.extensions_mut()); + } + res + }); + + // As of futures@0.1.21, there is a race condition in the mpsc + // channel, such that sending when the receiver is closing can + // result in the message being stuck inside the queue. It won't + // ever notify until the Sender side is dropped. + // + // To counteract this, we must check if our senders 'want' channel + // has been closed after having tried to send. If so, error out... + if pooled.is_closed() { + return Either::Right(Either::Left(fut)); + } + + Either::Right(Either::Right(fut.map_ok(move |mut res| { + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else if !res.body().is_end_stream() { + let (delayed_tx, delayed_rx) = oneshot::channel(); + res.body_mut().delayed_eof(delayed_rx); + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { + // At this point, `pooled` is dropped, and had a chance + // to insert into the pool (if conn was idle) + drop(delayed_tx); + }); + + executor.execute(on_idle); + } else { + // There's no body to delay, but the connection isn't + // ready yet. Only re-insert when it's ready + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + + executor.execute(on_idle); + } + res + }))) + }) + } + + fn connection_for( + &self, + pool_key: PoolKey, + ) -> impl Future>, ClientError>> { + // This actually races 2 different futures to try to get a ready + // connection the fastest, and to reduce connection churn. + // + // - If the pool has an idle connection waiting, that's used + // immediately. + // - Otherwise, the Connector is asked to start connecting to + // the destination Uri. + // - Meanwhile, the pool Checkout is watching to see if any other + // request finishes and tries to insert an idle connection. + // - If a new connection is started, but the Checkout wins after + // (an idle connection became available first), the started + // connection future is spawned into the runtime to complete, + // and then be inserted into the pool as an idle connection. + let checkout = self.pool.checkout(pool_key.clone()); + let connect = self.connect_to(pool_key); + + let executor = self.conn_builder.exec.clone(); + // The order of the `select` is depended on below... + future::select(checkout, connect).then(move |either| match either { + // Checkout won, connect future may have been started or not. + // + // If it has, let it finish and insert back into the pool, + // so as to not waste the socket... + Either::Left((Ok(checked_out), connecting)) => { + // This depends on the `select` above having the correct + // order, such that if the checkout future were ready + // immediately, the connect future will never have been + // started. + // + // If it *wasn't* ready yet, then the connect future will + // have been started... + if connecting.started() { + let bg = connecting + .map_err(|err| { + trace!("background connect error: {}", err); + }) + .map(|_pooled| { + // dropping here should just place it in + // the Pool for us... + }); + // An execute error here isn't important, we're just trying + // to prevent a waste of a socket... + executor.execute(bg); + } + Either::Left(future::ok(checked_out)) + } + // Connect won, checkout can just be dropped. + Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)), + // Either checkout or connect could get canceled: + // + // 1. Connect is canceled if this is HTTP/2 and there is + // an outstanding HTTP/2 connecting task. + // 2. Checkout is canceled if the pool cannot deliver an + // idle connection reliably. + // + // In both cases, we should just wait for the other future. + Either::Left((Err(err), connecting)) => Either::Right(Either::Left({ + if err.is_canceled() { + Either::Left(connecting.map_err(ClientError::Normal)) + } else { + Either::Right(future::err(ClientError::Normal(err))) + } + })), + Either::Right((Err(err), checkout)) => Either::Right(Either::Right({ + if err.is_canceled() { + Either::Left(checkout.map_err(ClientError::Normal)) + } else { + Either::Right(future::err(ClientError::Normal(err))) + } + })), + }) + } + + fn connect_to( + &self, + pool_key: PoolKey, + ) -> impl Lazy>>> + Unpin { + let executor = self.conn_builder.exec.clone(); + let pool = self.pool.clone(); + #[cfg(not(feature = "http2"))] + let conn_builder = self.conn_builder.clone(); + #[cfg(feature = "http2")] + let mut conn_builder = self.conn_builder.clone(); + let ver = self.config.ver; + let is_ver_h2 = ver == Ver::Http2; + let connector = self.connector.clone(); + let dst = domain_as_uri(pool_key.clone()); + hyper_lazy(move || { + // Try to take a "connecting lock". + // + // If the pool_key is for HTTP/2, and there is already a + // connection being established, then this can't take a + // second lock. The "connect_to" future is Canceled. + let connecting = match pool.connecting(&pool_key, ver) { + Some(lock) => lock, + None => { + let canceled = + crate::Error::new_canceled().with("HTTP/2 connection in progress"); + return Either::Right(future::err(canceled)); + } + }; + Either::Left( + connector + .connect(connect::sealed::Internal, dst) + .map_err(crate::Error::new_connect) + .and_then(move |io| { + let connected = io.connected(); + // If ALPN is h2 and we aren't http2_only already, + // then we need to convert our pool checkout into + // a single HTTP2 one. + let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { + match connecting.alpn_h2(&pool) { + Some(lock) => { + trace!("ALPN negotiated h2, updating pool"); + lock + } + None => { + // Another connection has already upgraded, + // the pool checkout should finish up for us. + let canceled = crate::Error::new_canceled() + .with("ALPN upgraded to HTTP/2"); + return Either::Right(future::err(canceled)); + } + } + } else { + connecting + }; + + #[cfg_attr(not(feature = "http2"), allow(unused))] + let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; + #[cfg(feature = "http2")] + { + conn_builder.http2_only(is_h2); + } + + Either::Left(Box::pin( + conn_builder + .handshake(io) + .and_then(move |(tx, conn)| { + trace!( + "handshake complete, spawning background dispatcher task" + ); + executor.execute( + conn.map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + tx.when_ready() + }) + .map_ok(move |tx| { + let tx = { + #[cfg(feature = "http2")] + { + if is_h2 { + PoolTx::Http2(tx.into_http2()) + } else { + PoolTx::Http1(tx) + } + } + #[cfg(not(feature = "http2"))] + PoolTx::Http1(tx) + }; + pool.pooled( + connecting, + PoolClient { + conn_info: connected, + tx, + }, + ) + }), + )) + }), + ) + }) + } +} + +impl tower_service::Service> for Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = crate::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl tower_service::Service> for &'_ Client +where + C: Connect + Clone + Send + Sync + 'static, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = crate::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl Clone for Client { + fn clone(&self) -> Client { + Client { + config: self.config.clone(), + conn_builder: self.conn_builder.clone(), + connector: self.connector.clone(), + pool: self.pool.clone(), + } + } +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Client").finish() + } +} + +// ===== impl ResponseFuture ===== + +impl ResponseFuture { + fn new(fut: Box>> + Send>) -> Self { + Self { inner: fut.into() } + } + + fn error_version(ver: Version) -> Self { + warn!("Request has unsupported version \"{:?}\"", ver); + ResponseFuture::new(Box::new(future::err( + crate::Error::new_user_unsupported_version(), + ))) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Future") + } +} + +impl Future for ResponseFuture { + type Output = crate::Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + Pin::new(&mut self.inner).poll(cx) + } +} + +// ===== impl PoolClient ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +struct PoolClient { + conn_info: Connected, + tx: PoolTx, +} + +enum PoolTx { + Http1(conn::SendRequest), + #[cfg(feature = "http2")] + Http2(conn::Http2SendRequest), +} + +impl PoolClient { + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + match self.tx { + PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), + #[cfg(feature = "http2")] + PoolTx::Http2(_) => Poll::Ready(Ok(())), + } + } + + fn is_http1(&self) -> bool { + !self.is_http2() + } + + fn is_http2(&self) -> bool { + match self.tx { + PoolTx::Http1(_) => false, + #[cfg(feature = "http2")] + PoolTx::Http2(_) => true, + } + } + + fn is_ready(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn is_closed(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_closed(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_closed(), + } + } +} + +impl PoolClient { + fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + where + B: Send, + { + match self.tx { + #[cfg(not(feature = "http2"))] + PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), + #[cfg(feature = "http2")] + PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), + } + } +} + +impl Poolable for PoolClient +where + B: Send + 'static, +{ + fn is_open(&self) -> bool { + match self.tx { + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } + + fn reserve(self) -> Reservation { + match self.tx { + PoolTx::Http1(tx) => Reservation::Unique(PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http1(tx), + }), + #[cfg(feature = "http2")] + PoolTx::Http2(tx) => { + let b = PoolClient { + conn_info: self.conn_info.clone(), + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http2(tx), + }; + Reservation::Shared(a, b) + } + } + } + + fn can_share(&self) -> bool { + self.is_http2() + } +} + +// ===== impl ClientError ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +enum ClientError { + Normal(crate::Error), + Canceled { + connection_reused: bool, + req: Request, + reason: crate::Error, + }, +} + +impl ClientError { + fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option>)) -> Self { + move |(err, orig_req)| { + if let Some(req) = orig_req { + ClientError::Canceled { + connection_reused: conn_reused, + reason: err, + req, + } + } else { + ClientError::Normal(err) + } + } + } +} + +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub(super) enum Ver { + Auto, + Http2, +} + +fn origin_form(uri: &mut Uri) { + let path = match uri.path_and_query() { + Some(path) if path.as_str() != "/" => { + let mut parts = ::http::uri::Parts::default(); + parts.path_and_query = Some(path.clone()); + Uri::from_parts(parts).expect("path is valid uri") + } + _none_or_just_slash => { + debug_assert!(Uri::default() == "/"); + Uri::default() + } + }; + *uri = path +} + +fn absolute_form(uri: &mut Uri) { + debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); + debug_assert!( + uri.authority().is_some(), + "absolute_form needs an authority" + ); + // If the URI is to HTTPS, and the connector claimed to be a proxy, + // then it *should* have tunneled, and so we don't want to send + // absolute-form in that case. + if uri.scheme() == Some(&Scheme::HTTPS) { + origin_form(uri); + } +} + +fn authority_form(uri: &mut Uri) { + if let Some(path) = uri.path_and_query() { + // `https://hyper.rs` would parse with `/` path, don't + // annoy people about that... + if path != "/" { + warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); + } + } + *uri = match uri.authority() { + Some(auth) => { + let mut parts = ::http::uri::Parts::default(); + parts.authority = Some(auth.clone()); + Uri::from_parts(parts).expect("authority is valid") + } + None => { + unreachable!("authority_form with relative uri"); + } + }; +} + +fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result { + let uri_clone = uri.clone(); + match (uri_clone.scheme(), uri_clone.authority()) { + (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), + (None, Some(auth)) if is_http_connect => { + let scheme = match auth.port_u16() { + Some(443) => { + set_scheme(uri, Scheme::HTTPS); + Scheme::HTTPS + } + _ => { + set_scheme(uri, Scheme::HTTP); + Scheme::HTTP + } + }; + Ok((scheme, auth.clone())) + } + _ => { + debug!("Client requires absolute-form URIs, received: {:?}", uri); + Err(crate::Error::new_user_absolute_uri_required()) + } + } +} + +fn domain_as_uri((scheme, auth): PoolKey) -> Uri { + http::uri::Builder::new() + .scheme(scheme) + .authority(auth) + .path_and_query("/") + .build() + .expect("domain is valid Uri") +} + +fn set_scheme(uri: &mut Uri, scheme: Scheme) { + debug_assert!( + uri.scheme().is_none(), + "set_scheme expects no existing scheme" + ); + let old = mem::replace(uri, Uri::default()); + let mut parts: ::http::uri::Parts = old.into(); + parts.scheme = Some(scheme); + parts.path_and_query = Some("/".parse().expect("slash is a valid path")); + *uri = Uri::from_parts(parts).expect("scheme is valid"); +} + +/// A builder to configure a new [`Client`](Client). +/// +/// # Example +/// +/// ``` +/// # #[cfg(feature = "runtime")] +/// # fn run () { +/// use std::time::Duration; +/// use hyper::Client; +/// +/// let client = Client::builder() +/// .pool_idle_timeout(Duration::from_secs(30)) +/// .http2_only(true) +/// .build_http(); +/// # let infer: Client<_, hyper::Body> = client; +/// # drop(infer); +/// # } +/// # fn main() {} +/// ``` +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[derive(Clone)] +pub struct Builder { + client_config: Config, + conn_builder: conn::Builder, + pool_config: pool::Config, +} + +impl Default for Builder { + fn default() -> Self { + Self { + client_config: Config { + retry_canceled_requests: true, + set_host: true, + ver: Ver::Auto, + }, + conn_builder: conn::Builder::new(), + pool_config: pool::Config { + idle_timeout: Some(Duration::from_secs(90)), + max_idle_per_host: std::usize::MAX, + }, + } + } +} + +impl Builder { + #[doc(hidden)] + #[deprecated( + note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)" + )] + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + if !val { + // disable + self.pool_max_idle_per_host(0) + } else if self.pool_config.max_idle_per_host == 0 { + // enable + self.pool_max_idle_per_host(std::usize::MAX) + } else { + // already enabled + self + } + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_idle_timeout`")] + pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_idle_timeout(val) + } + + /// Set an optional timeout for idle sockets being kept-alive. + /// + /// Pass `None` to disable timeout. + /// + /// Default is 90 seconds. + pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_config.idle_timeout = val.into(); + self + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_max_idle_per_host`")] + pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + /// Sets the maximum idle connection per host allowed in the pool. + /// + /// Default is `usize::MAX` (no limit). + pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + // HTTP/1 options + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { + self.conn_builder.h1_read_buf_exact_size(Some(sz)); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { + self.conn_builder.h1_max_buf_size(max); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.conn_builder.h1_title_case_headers(val); + self + } + + /// Set whether the connection **must** use HTTP/2. + /// + /// The destination must either allow HTTP2 Prior Knowledge, or the + /// `Connect` should be configured to do use ALPN to upgrade to `h2` + /// as part of the connection process. This will not make the `Client` + /// utilize ALPN by itself. + /// + /// Note that setting this to true prevents HTTP/1 from being allowed. + /// + /// Default is false. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(&mut self, val: bool) -> &mut Self { + self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.conn_builder + .http2_initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size( + &mut self, + sz: impl Into>, + ) -> &mut Self { + self.conn_builder + .http2_initial_connection_window_size(sz.into()); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { + self.conn_builder.http2_adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + self.conn_builder.http2_max_frame_size(sz); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval( + &mut self, + interval: impl Into>, + ) -> &mut Self { + self.conn_builder.http2_keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.conn_builder.http2_keep_alive_timeout(timeout); + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `http2_keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.conn_builder.http2_keep_alive_while_idle(enabled); + self + } + + /// Set whether to retry requests that get disrupted before ever starting + /// to write. + /// + /// This means a request that is queued, and gets given an idle, reused + /// connection, and then encounters an error immediately as the idle + /// connection was found to be unusable. + /// + /// When this is set to `false`, the related `ResponseFuture` would instead + /// resolve to an `Error::Cancel`. + /// + /// Default is `true`. + #[inline] + pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { + self.client_config.retry_canceled_requests = val; + self + } + + /// Set whether to automatically add the `Host` header to requests. + /// + /// If true, and a request does not include a `Host` header, one will be + /// added automatically, derived from the authority of the `Uri`. + /// + /// Default is `true`. + #[inline] + pub fn set_host(&mut self, val: bool) -> &mut Self { + self.client_config.set_host = val; + self + } + + /// Provide an executor to execute background `Connection` tasks. + pub fn executor(&mut self, exec: E) -> &mut Self + where + E: Executor + Send + Sync + 'static, + { + self.conn_builder.executor(exec); + self + } + + /// Builder a client with this configuration and the default `HttpConnector`. + #[cfg(feature = "tcp")] + pub fn build_http(&self) -> Client + where + B: HttpBody + Send, + B::Data: Send, + { + let mut connector = HttpConnector::new(); + if self.pool_config.is_enabled() { + connector.set_keepalive(self.pool_config.idle_timeout); + } + self.build(connector) + } + + /// Combine the configuration of this builder with a connector to create a `Client`. + pub fn build(&self, connector: C) -> Client + where + C: Connect + Clone, + B: HttpBody + Send, + B::Data: Send, + { + Client { + config: self.client_config, + conn_builder: self.conn_builder.clone(), + connector, + pool: Pool::new(self.pool_config, &self.conn_builder.exec), + } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Builder") + .field("client_config", &self.client_config) + .field("conn_builder", &self.conn_builder) + .field("pool_config", &self.pool_config) + .finish() + } +} + +#[cfg(test)] +mod unit_tests { + use super::*; + + #[test] + fn set_relative_uri_with_implicit_path() { + let mut uri = "http://hyper.rs".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/"); + } + + #[test] + fn test_origin_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); + + let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); + origin_form(&mut uri); + assert_eq!(uri.to_string(), "/guides?foo=bar"); + } + + #[test] + fn test_absolute_form() { + let mut uri = "http://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "http://hyper.rs/guides"); + + let mut uri = "https://hyper.rs/guides".parse().unwrap(); + absolute_form(&mut uri); + assert_eq!(uri.to_string(), "/guides"); + } + + #[test] + fn test_authority_form() { + let _ = pretty_env_logger::try_init(); + + let mut uri = "http://hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); + + let mut uri = "hyper.rs".parse().unwrap(); + authority_form(&mut uri); + assert_eq!(uri.to_string(), "hyper.rs"); + } + + #[test] + fn test_extract_domain_connect_no_port() { + let mut uri = "hyper.rs".parse().unwrap(); + let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain"); + assert_eq!(scheme, *"http"); + assert_eq!(host, "hyper.rs"); + } +} diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 3a9c7f708d..eac980fa3d 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -27,6 +27,7 @@ use super::{Connected, Connection}; /// /// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes /// transport information such as the remote socket address used. +#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] #[derive(Clone)] pub struct HttpConnector { config: Arc, diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs index 52c52ee9bc..862a0e65c1 100644 --- a/src/client/connect/mod.rs +++ b/src/client/connect/mod.rs @@ -83,13 +83,20 @@ use std::fmt; use ::http::Extensions; -#[cfg(feature = "tcp")] -pub mod dns; -#[cfg(feature = "tcp")] -mod http; -#[cfg(feature = "tcp")] -pub use self::http::{HttpConnector, HttpInfo}; -pub use self::sealed::Connect; +cfg_feature! { + #![feature = "tcp"] + + pub use self::http::{HttpConnector, HttpInfo}; + + pub mod dns; + mod http; +} + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + pub use self::sealed::Connect; +} /// Describes a type returned by a connector. pub trait Connection { @@ -260,6 +267,7 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] pub(super) mod sealed { use std::error::Error as StdError; diff --git a/src/client/mod.rs b/src/client/mod.rs index 32fb4a6b51..7f3006aa3b 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -48,1243 +48,22 @@ //! # fn main () {} //! ``` -use std::error::Error as StdError; -use std::fmt; -use std::mem; -use std::time::Duration; - -use futures_channel::oneshot; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use http::header::{HeaderValue, HOST}; -use http::uri::Scheme; -use http::{Method, Request, Response, Uri, Version}; - -use self::connect::{sealed::Connect, Alpn, Connected, Connection}; -use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation}; -use crate::body::{Body, HttpBody}; -use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll}; -use crate::rt::Executor; - #[cfg(feature = "tcp")] pub use self::connect::HttpConnector; -pub mod conn; pub mod connect; -pub(crate) mod dispatch; -mod pool; -pub mod service; #[cfg(test)] #[cfg(feature = "runtime")] mod tests; -/// A Client to make outgoing HTTP requests. -pub struct Client { - config: Config, - conn_builder: conn::Builder, - connector: C, - pool: Pool>, -} - -#[derive(Clone, Copy, Debug)] -struct Config { - retry_canceled_requests: bool, - set_host: bool, - ver: Ver, -} - -/// A `Future` that will resolve to an HTTP Response. -/// -/// This is returned by `Client::request` (and `Client::get`). -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: Pin>> + Send>>, -} - -// ===== impl Client ===== - -#[cfg(feature = "tcp")] -impl Client { - /// Create a new Client with the default [config](Builder). - /// - /// # Note - /// - /// The default connector does **not** handle TLS. Speaking to `https` - /// destinations will require [configuring a connector that implements - /// TLS](https://hyper.rs/guides/client/configuration). - #[inline] - pub fn new() -> Client { - Builder::default().build_http() - } -} - -#[cfg(feature = "tcp")] -impl Default for Client { - fn default() -> Client { - Client::new() - } -} - -impl Client<(), Body> { - /// Create a builder to configure a new `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use std::time::Duration; - /// use hyper::Client; - /// - /// let client = Client::builder() - /// .pool_idle_timeout(Duration::from_secs(30)) - /// .http2_only(true) - /// .build_http(); - /// # let infer: Client<_, hyper::Body> = client; - /// # drop(infer); - /// # } - /// # fn main() {} - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::default() - } -} - -impl Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - /// Send a `GET` request to the supplied `Uri`. - /// - /// # Note - /// - /// This requires that the `HttpBody` type have a `Default` implementation. - /// It *should* return an "empty" version of itself, such that - /// `HttpBody::is_end_stream` is `true`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Client, Uri}; - /// - /// let client = Client::new(); - /// - /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); - /// # } - /// # fn main() {} - /// ``` - pub fn get(&self, uri: Uri) -> ResponseFuture - where - B: Default, - { - let body = B::default(); - if !body.is_end_stream() { - warn!("default HttpBody used for get() does not return true for is_end_stream"); - } - - let mut req = Request::new(body); - *req.uri_mut() = uri; - self.request(req) - } - - /// Send a constructed `Request` using this `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Body, Client, Request}; - /// - /// let client = Client::new(); - /// - /// let req = Request::builder() - /// .method("POST") - /// .uri("http://httpin.org/post") - /// .body(Body::from("Hallo!")) - /// .expect("request builder"); - /// - /// let future = client.request(req); - /// # } - /// # fn main() {} - /// ``` - pub fn request(&self, mut req: Request) -> ResponseFuture { - let is_http_connect = req.method() == Method::CONNECT; - match req.version() { - Version::HTTP_11 => (), - Version::HTTP_10 => { - if is_http_connect { - warn!("CONNECT is not allowed for HTTP/1.0"); - return ResponseFuture::new(Box::new(future::err( - crate::Error::new_user_unsupported_request_method(), - ))); - } - } - other_h2 @ Version::HTTP_2 => { - if self.config.ver != Ver::Http2 { - return ResponseFuture::error_version(other_h2); - } - } - // completely unsupported HTTP version (like HTTP/0.9)! - other => return ResponseFuture::error_version(other), - }; - - let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { - Ok(s) => s, - Err(err) => { - return ResponseFuture::new(Box::new(future::err(err))); - } - }; - - ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key))) - } - - fn retryably_send_request( - &self, - req: Request, - pool_key: PoolKey, - ) -> impl Future>> { - let client = self.clone(); - let uri = req.uri().clone(); - - let mut send_fut = client.send_request(req, pool_key.clone()); - future::poll_fn(move |cx| loop { - match ready!(Pin::new(&mut send_fut).poll(cx)) { - Ok(resp) => return Poll::Ready(Ok(resp)), - Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)), - Err(ClientError::Canceled { - connection_reused, - mut req, - reason, - }) => { - if !client.config.retry_canceled_requests || !connection_reused { - // if client disabled, don't retry - // a fresh connection means we definitely can't retry - return Poll::Ready(Err(reason)); - } - - trace!( - "unstarted request canceled, trying again (reason={:?})", - reason - ); - *req.uri_mut() = uri.clone(); - send_fut = client.send_request(req, pool_key.clone()); - } - } - }) - } - - fn send_request( - &self, - mut req: Request, - pool_key: PoolKey, - ) -> impl Future, ClientError>> + Unpin { - let conn = self.connection_for(pool_key); - - let set_host = self.config.set_host; - let executor = self.conn_builder.exec.clone(); - conn.and_then(move |mut pooled| { - if pooled.is_http1() { - if set_host { - let uri = req.uri().clone(); - req.headers_mut().entry(HOST).or_insert_with(|| { - let hostname = uri.host().expect("authority implies host"); - if let Some(port) = uri.port() { - let s = format!("{}:{}", hostname, port); - HeaderValue::from_str(&s) - } else { - HeaderValue::from_str(hostname) - } - .expect("uri host is valid header value") - }); - } - - // CONNECT always sends authority-form, so check it first... - if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } else if pooled.conn_info.is_proxied { - absolute_form(req.uri_mut()); - } else { - origin_form(req.uri_mut()); - }; - } else if req.method() == Method::CONNECT { - debug!("client does not support CONNECT requests over HTTP2"); - return Either::Left(future::err(ClientError::Normal( - crate::Error::new_user_unsupported_request_method(), - ))); - } - - let fut = pooled - .send_request_retryable(req) - .map_err(ClientError::map_with_reused(pooled.is_reused())); - - // If the Connector included 'extra' info, add to Response... - let extra_info = pooled.conn_info.extra.clone(); - let fut = fut.map_ok(move |mut res| { - if let Some(extra) = extra_info { - extra.set(res.extensions_mut()); - } - res - }); - - // As of futures@0.1.21, there is a race condition in the mpsc - // channel, such that sending when the receiver is closing can - // result in the message being stuck inside the queue. It won't - // ever notify until the Sender side is dropped. - // - // To counteract this, we must check if our senders 'want' channel - // has been closed after having tried to send. If so, error out... - if pooled.is_closed() { - return Either::Right(Either::Left(fut)); - } - - Either::Right(Either::Right(fut.map_ok(move |mut res| { - // If pooled is HTTP/2, we can toss this reference immediately. - // - // when pooled is dropped, it will try to insert back into the - // pool. To delay that, spawn a future that completes once the - // sender is ready again. - // - // This *should* only be once the related `Connection` has polled - // for a new request to start. - // - // It won't be ready if there is a body to stream. - if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { - drop(pooled); - } else if !res.body().is_end_stream() { - let (delayed_tx, delayed_rx) = oneshot::channel(); - res.body_mut().delayed_eof(delayed_rx); - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { - // At this point, `pooled` is dropped, and had a chance - // to insert into the pool (if conn was idle) - drop(delayed_tx); - }); - - executor.execute(on_idle); - } else { - // There's no body to delay, but the connection isn't - // ready yet. Only re-insert when it's ready - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); - - executor.execute(on_idle); - } - res - }))) - }) - } - - fn connection_for( - &self, - pool_key: PoolKey, - ) -> impl Future>, ClientError>> { - // This actually races 2 different futures to try to get a ready - // connection the fastest, and to reduce connection churn. - // - // - If the pool has an idle connection waiting, that's used - // immediately. - // - Otherwise, the Connector is asked to start connecting to - // the destination Uri. - // - Meanwhile, the pool Checkout is watching to see if any other - // request finishes and tries to insert an idle connection. - // - If a new connection is started, but the Checkout wins after - // (an idle connection became available first), the started - // connection future is spawned into the runtime to complete, - // and then be inserted into the pool as an idle connection. - let checkout = self.pool.checkout(pool_key.clone()); - let connect = self.connect_to(pool_key); - - let executor = self.conn_builder.exec.clone(); - // The order of the `select` is depended on below... - future::select(checkout, connect).then(move |either| match either { - // Checkout won, connect future may have been started or not. - // - // If it has, let it finish and insert back into the pool, - // so as to not waste the socket... - Either::Left((Ok(checked_out), connecting)) => { - // This depends on the `select` above having the correct - // order, such that if the checkout future were ready - // immediately, the connect future will never have been - // started. - // - // If it *wasn't* ready yet, then the connect future will - // have been started... - if connecting.started() { - let bg = connecting - .map_err(|err| { - trace!("background connect error: {}", err); - }) - .map(|_pooled| { - // dropping here should just place it in - // the Pool for us... - }); - // An execute error here isn't important, we're just trying - // to prevent a waste of a socket... - executor.execute(bg); - } - Either::Left(future::ok(checked_out)) - } - // Connect won, checkout can just be dropped. - Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)), - // Either checkout or connect could get canceled: - // - // 1. Connect is canceled if this is HTTP/2 and there is - // an outstanding HTTP/2 connecting task. - // 2. Checkout is canceled if the pool cannot deliver an - // idle connection reliably. - // - // In both cases, we should just wait for the other future. - Either::Left((Err(err), connecting)) => Either::Right(Either::Left({ - if err.is_canceled() { - Either::Left(connecting.map_err(ClientError::Normal)) - } else { - Either::Right(future::err(ClientError::Normal(err))) - } - })), - Either::Right((Err(err), checkout)) => Either::Right(Either::Right({ - if err.is_canceled() { - Either::Left(checkout.map_err(ClientError::Normal)) - } else { - Either::Right(future::err(ClientError::Normal(err))) - } - })), - }) - } - - fn connect_to( - &self, - pool_key: PoolKey, - ) -> impl Lazy>>> + Unpin { - let executor = self.conn_builder.exec.clone(); - let pool = self.pool.clone(); - #[cfg(not(feature = "http2"))] - let conn_builder = self.conn_builder.clone(); - #[cfg(feature = "http2")] - let mut conn_builder = self.conn_builder.clone(); - let ver = self.config.ver; - let is_ver_h2 = ver == Ver::Http2; - let connector = self.connector.clone(); - let dst = domain_as_uri(pool_key.clone()); - hyper_lazy(move || { - // Try to take a "connecting lock". - // - // If the pool_key is for HTTP/2, and there is already a - // connection being established, then this can't take a - // second lock. The "connect_to" future is Canceled. - let connecting = match pool.connecting(&pool_key, ver) { - Some(lock) => lock, - None => { - let canceled = - crate::Error::new_canceled().with("HTTP/2 connection in progress"); - return Either::Right(future::err(canceled)); - } - }; - Either::Left( - connector - .connect(connect::sealed::Internal, dst) - .map_err(crate::Error::new_connect) - .and_then(move |io| { - let connected = io.connected(); - // If ALPN is h2 and we aren't http2_only already, - // then we need to convert our pool checkout into - // a single HTTP2 one. - let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { - match connecting.alpn_h2(&pool) { - Some(lock) => { - trace!("ALPN negotiated h2, updating pool"); - lock - } - None => { - // Another connection has already upgraded, - // the pool checkout should finish up for us. - let canceled = crate::Error::new_canceled() - .with("ALPN upgraded to HTTP/2"); - return Either::Right(future::err(canceled)); - } - } - } else { - connecting - }; - - #[cfg_attr(not(feature = "http2"), allow(unused))] - let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; - #[cfg(feature = "http2")] - { - conn_builder.http2_only(is_h2); - } - - Either::Left(Box::pin( - conn_builder - .handshake(io) - .and_then(move |(tx, conn)| { - trace!( - "handshake complete, spawning background dispatcher task" - ); - executor.execute( - conn.map_err(|e| debug!("client connection error: {}", e)) - .map(|_| ()), - ); - - // Wait for 'conn' to ready up before we - // declare this tx as usable - tx.when_ready() - }) - .map_ok(move |tx| { - let tx = { - #[cfg(feature = "http2")] - { - if is_h2 { - PoolTx::Http2(tx.into_http2()) - } else { - PoolTx::Http1(tx) - } - } - #[cfg(not(feature = "http2"))] - PoolTx::Http1(tx) - }; - pool.pooled( - connecting, - PoolClient { - conn_info: connected, - tx, - }, - ) - }), - )) - }), - ) - }) - } -} - -impl tower_service::Service> for Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.request(req) - } -} - -impl tower_service::Service> for &'_ Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.request(req) - } -} - -impl Clone for Client { - fn clone(&self) -> Client { - Client { - config: self.config.clone(), - conn_builder: self.conn_builder.clone(), - connector: self.connector.clone(), - pool: self.pool.clone(), - } - } -} - -impl fmt::Debug for Client { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Client").finish() - } -} - -// ===== impl ResponseFuture ===== - -impl ResponseFuture { - fn new(fut: Box>> + Send>) -> Self { - Self { inner: fut.into() } - } - - fn error_version(ver: Version) -> Self { - warn!("Request has unsupported version \"{:?}\"", ver); - ResponseFuture::new(Box::new(future::err( - crate::Error::new_user_unsupported_version(), - ))) - } -} - -impl fmt::Debug for ResponseFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Future") - } -} - -impl Future for ResponseFuture { - type Output = crate::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) - } -} - -// ===== impl PoolClient ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -struct PoolClient { - conn_info: Connected, - tx: PoolTx, -} - -enum PoolTx { - Http1(conn::SendRequest), - #[cfg(feature = "http2")] - Http2(conn::Http2SendRequest), -} - -impl PoolClient { - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.tx { - PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), - #[cfg(feature = "http2")] - PoolTx::Http2(_) => Poll::Ready(Ok(())), - } - } - - fn is_http1(&self) -> bool { - !self.is_http2() - } - - fn is_http2(&self) -> bool { - match self.tx { - PoolTx::Http1(_) => false, - #[cfg(feature = "http2")] - PoolTx::Http2(_) => true, - } - } - - fn is_ready(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn is_closed(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_closed(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_closed(), - } - } -} - -impl PoolClient { - fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> - where - B: Send, - { - match self.tx { - #[cfg(not(feature = "http2"))] - PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), - #[cfg(feature = "http2")] - PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), - #[cfg(feature = "http2")] - PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), - } - } -} - -impl Poolable for PoolClient -where - B: Send + 'static, -{ - fn is_open(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn reserve(self) -> Reservation { - match self.tx { - PoolTx::Http1(tx) => Reservation::Unique(PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http1(tx), - }), - #[cfg(feature = "http2")] - PoolTx::Http2(tx) => { - let b = PoolClient { - conn_info: self.conn_info.clone(), - tx: PoolTx::Http2(tx.clone()), - }; - let a = PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http2(tx), - }; - Reservation::Shared(a, b) - } - } - } - - fn can_share(&self) -> bool { - self.is_http2() - } -} - -// ===== impl ClientError ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -enum ClientError { - Normal(crate::Error), - Canceled { - connection_reused: bool, - req: Request, - reason: crate::Error, - }, -} - -impl ClientError { - fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option>)) -> Self { - move |(err, orig_req)| { - if let Some(req) = orig_req { - ClientError::Canceled { - connection_reused: conn_reused, - reason: err, - req, - } - } else { - ClientError::Normal(err) - } - } - } -} - -/// A marker to identify what version a pooled connection is. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -enum Ver { - Auto, - Http2, -} - -fn origin_form(uri: &mut Uri) { - let path = match uri.path_and_query() { - Some(path) if path.as_str() != "/" => { - let mut parts = ::http::uri::Parts::default(); - parts.path_and_query = Some(path.clone()); - Uri::from_parts(parts).expect("path is valid uri") - } - _none_or_just_slash => { - debug_assert!(Uri::default() == "/"); - Uri::default() - } - }; - *uri = path -} - -fn absolute_form(uri: &mut Uri) { - debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); - debug_assert!( - uri.authority().is_some(), - "absolute_form needs an authority" - ); - // If the URI is to HTTPS, and the connector claimed to be a proxy, - // then it *should* have tunneled, and so we don't want to send - // absolute-form in that case. - if uri.scheme() == Some(&Scheme::HTTPS) { - origin_form(uri); - } -} - -fn authority_form(uri: &mut Uri) { - if let Some(path) = uri.path_and_query() { - // `https://hyper.rs` would parse with `/` path, don't - // annoy people about that... - if path != "/" { - warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); - } - } - *uri = match uri.authority() { - Some(auth) => { - let mut parts = ::http::uri::Parts::default(); - parts.authority = Some(auth.clone()); - Uri::from_parts(parts).expect("authority is valid") - } - None => { - unreachable!("authority_form with relative uri"); - } - }; -} - -fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result { - let uri_clone = uri.clone(); - match (uri_clone.scheme(), uri_clone.authority()) { - (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), - (None, Some(auth)) if is_http_connect => { - let scheme = match auth.port_u16() { - Some(443) => { - set_scheme(uri, Scheme::HTTPS); - Scheme::HTTPS - } - _ => { - set_scheme(uri, Scheme::HTTP); - Scheme::HTTP - } - }; - Ok((scheme, auth.clone())) - } - _ => { - debug!("Client requires absolute-form URIs, received: {:?}", uri); - Err(crate::Error::new_user_absolute_uri_required()) - } - } -} - -fn domain_as_uri((scheme, auth): PoolKey) -> Uri { - http::uri::Builder::new() - .scheme(scheme) - .authority(auth) - .path_and_query("/") - .build() - .expect("domain is valid Uri") -} - -fn set_scheme(uri: &mut Uri, scheme: Scheme) { - debug_assert!( - uri.scheme().is_none(), - "set_scheme expects no existing scheme" - ); - let old = mem::replace(uri, Uri::default()); - let mut parts: ::http::uri::Parts = old.into(); - parts.scheme = Some(scheme); - parts.path_and_query = Some("/".parse().expect("slash is a valid path")); - *uri = Uri::from_parts(parts).expect("scheme is valid"); -} - -/// A builder to configure a new [`Client`](Client). -/// -/// # Example -/// -/// ``` -/// # #[cfg(feature = "runtime")] -/// # fn run () { -/// use std::time::Duration; -/// use hyper::Client; -/// -/// let client = Client::builder() -/// .pool_idle_timeout(Duration::from_secs(30)) -/// .http2_only(true) -/// .build_http(); -/// # let infer: Client<_, hyper::Body> = client; -/// # drop(infer); -/// # } -/// # fn main() {} -/// ``` -#[derive(Clone)] -pub struct Builder { - client_config: Config, - conn_builder: conn::Builder, - pool_config: pool::Config, -} - -impl Default for Builder { - fn default() -> Self { - Self { - client_config: Config { - retry_canceled_requests: true, - set_host: true, - ver: Ver::Auto, - }, - conn_builder: conn::Builder::new(), - pool_config: pool::Config { - idle_timeout: Some(Duration::from_secs(90)), - max_idle_per_host: std::usize::MAX, - }, - } - } -} - -impl Builder { - #[doc(hidden)] - #[deprecated( - note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)" - )] - pub fn keep_alive(&mut self, val: bool) -> &mut Self { - if !val { - // disable - self.pool_max_idle_per_host(0) - } else if self.pool_config.max_idle_per_host == 0 { - // enable - self.pool_max_idle_per_host(std::usize::MAX) - } else { - // already enabled - self - } - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_idle_timeout`")] - pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_idle_timeout(val) - } - - /// Set an optional timeout for idle sockets being kept-alive. - /// - /// Pass `None` to disable timeout. - /// - /// Default is 90 seconds. - pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_config.idle_timeout = val.into(); - self - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_max_idle_per_host`")] - pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - /// Sets the maximum idle connection per host allowed in the pool. - /// - /// Default is `usize::MAX` (no limit). - pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - // HTTP/1 options - - /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `http1_max_buf_size` option. - /// - /// Default is an adaptive read buffer. - pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { - self.conn_builder.h1_read_buf_exact_size(Some(sz)); - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `http1_read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - self.conn_builder.h1_max_buf_size(max); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { - self.conn_builder.h1_title_case_headers(val); - self - } - - /// Set whether the connection **must** use HTTP/2. - /// - /// The destination must either allow HTTP2 Prior Knowledge, or the - /// `Connect` should be configured to do use ALPN to upgrade to `h2` - /// as part of the connection process. This will not make the `Client` - /// utilize ALPN by itself. - /// - /// Note that setting this to true prevents HTTP/1 from being allowed. - /// - /// Default is false. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(&mut self, val: bool) -> &mut Self { - self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - self.conn_builder - .http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - self.conn_builder - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_adaptive_window(enabled); - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - self.conn_builder.http2_max_frame_size(sz); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.conn_builder.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.conn_builder.http2_keep_alive_timeout(timeout); - self - } - - /// Sets whether HTTP2 keep-alive should apply while the connection is idle. - /// - /// If disabled, keep-alive pings are only sent while there are open - /// request/responses streams. If enabled, pings are also sent when no - /// streams are active. Does nothing if `http2_keep_alive_interval` is - /// disabled. - /// - /// Default is `false`. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_keep_alive_while_idle(enabled); - self - } - - /// Set whether to retry requests that get disrupted before ever starting - /// to write. - /// - /// This means a request that is queued, and gets given an idle, reused - /// connection, and then encounters an error immediately as the idle - /// connection was found to be unusable. - /// - /// When this is set to `false`, the related `ResponseFuture` would instead - /// resolve to an `Error::Cancel`. - /// - /// Default is `true`. - #[inline] - pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { - self.client_config.retry_canceled_requests = val; - self - } - - /// Set whether to automatically add the `Host` header to requests. - /// - /// If true, and a request does not include a `Host` header, one will be - /// added automatically, derived from the authority of the `Uri`. - /// - /// Default is `true`. - #[inline] - pub fn set_host(&mut self, val: bool) -> &mut Self { - self.client_config.set_host = val; - self - } - - /// Provide an executor to execute background `Connection` tasks. - pub fn executor(&mut self, exec: E) -> &mut Self - where - E: Executor + Send + Sync + 'static, - { - self.conn_builder.executor(exec); - self - } - - /// Builder a client with this configuration and the default `HttpConnector`. - #[cfg(feature = "tcp")] - pub fn build_http(&self) -> Client - where - B: HttpBody + Send, - B::Data: Send, - { - let mut connector = HttpConnector::new(); - if self.pool_config.is_enabled() { - connector.set_keepalive(self.pool_config.idle_timeout); - } - self.build(connector) - } - - /// Combine the configuration of this builder with a connector to create a `Client`. - pub fn build(&self, connector: C) -> Client - where - C: Connect + Clone, - B: HttpBody + Send, - B::Data: Send, - { - Client { - config: self.client_config, - conn_builder: self.conn_builder.clone(), - connector, - pool: Pool::new(self.pool_config, &self.conn_builder.exec), - } - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Builder") - .field("client_config", &self.client_config) - .field("conn_builder", &self.conn_builder) - .field("pool_config", &self.pool_config) - .finish() - } -} - -#[cfg(test)] -mod unit_tests { - use super::*; - - #[test] - fn set_relative_uri_with_implicit_path() { - let mut uri = "http://hyper.rs".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/"); - } - - #[test] - fn test_origin_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - - let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides?foo=bar"); - } - - #[test] - fn test_absolute_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "http://hyper.rs/guides"); - - let mut uri = "https://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - } - - #[test] - fn test_authority_form() { - let _ = pretty_env_logger::try_init(); - - let mut uri = "http://hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); +cfg_feature! { + #![any(feature = "http1", feature = "http2")] - let mut uri = "hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); - } + pub use self::client::{Builder, Client, ResponseFuture}; - #[test] - fn test_extract_domain_connect_no_port() { - let mut uri = "hyper.rs".parse().unwrap(); - let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain"); - assert_eq!(scheme, *"http"); - assert_eq!(host, "hyper.rs"); - } + mod client; + pub mod conn; + pub(crate) mod dispatch; + mod pool; + pub mod service; } diff --git a/src/client/pool.rs b/src/client/pool.rs index 26be53544a..bbee0344d5 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -10,7 +10,7 @@ use futures_channel::oneshot; #[cfg(feature = "runtime")] use tokio::time::{Duration, Instant, Interval}; -use super::Ver; +use super::client::Ver; use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin}; // FIXME: allow() required due to `impl Trait` leaking types to this lint diff --git a/src/lib.rs b/src/lib.rs index ed457ddeb0..275d765ff0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -58,7 +58,11 @@ #[doc(hidden)] pub use http; -#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg(any( + feature = "http1", + feature = "http2", + all(feature = "client", feature = "tcp") +))] #[macro_use] extern crate tracing; @@ -89,9 +93,11 @@ cfg_proto! { } cfg_feature! { - #![all(feature = "client", any(feature = "http1", feature = "http2"))] + #![all(feature = "client")] pub mod client; + #[cfg(any(feature = "http1", feature = "http2"))] + #[doc(no_inline)] pub use crate::client::Client; } diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index b42fc81e3c..85e4c016d9 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -91,6 +91,7 @@ where self.read_buf_strategy = ReadStrategy::Exact(sz); } + #[cfg(feature = "server")] pub fn set_write_strategy_flatten(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself @@ -475,6 +476,7 @@ impl WriteBuf where B: Buf, { + #[cfg(feature = "server")] fn set_strategy(&mut self, strategy: WriteStrategy) { self.strategy = strategy; } From 510b998ff3dc75e0dfc7e236212f5fef33142ad1 Mon Sep 17 00:00:00 2001 From: Maxim Zhukov Date: Tue, 29 Dec 2020 02:52:28 +0300 Subject: [PATCH 004/420] chore(dev-dependencies): bump url to 2.2 (#2379) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 88a231b12e..4874988bd1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ tokio = { version = "1", features = [ tokio-test = "0.4" tokio-util = { version = "0.6", features = ["codec"] } tower-util = "0.3" -url = "1.0" +url = "2.2" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies] pnet_datalink = "0.27.2" From a6d4fcbee65bebf461291def75f4c512ec62a664 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Tue, 29 Dec 2020 21:19:57 +0300 Subject: [PATCH 005/420] feat(server): expose `Accept` without httpX features (#2382) --- src/lib.rs | 4 +- src/server/conn.rs | 2 +- src/server/mod.rs | 453 +---------------------------------------- src/server/server.rs | 444 ++++++++++++++++++++++++++++++++++++++++ src/server/shutdown.rs | 2 +- src/server/tcp.rs | 2 +- 6 files changed, 458 insertions(+), 449 deletions(-) create mode 100644 src/server/server.rs diff --git a/src/lib.rs b/src/lib.rs index 275d765ff0..d37170778f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -102,8 +102,10 @@ cfg_feature! { } cfg_feature! { - #![all(feature = "server", any(feature = "http1", feature = "http2"))] + #![all(feature = "server")] pub mod server; + #[cfg(any(feature = "http1", feature = "http2"))] + #[doc(no_inline)] pub use crate::server::Server; } diff --git a/src/server/conn.rs b/src/server/conn.rs index 9a10f2dccc..b94b5054b9 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -57,7 +57,7 @@ use bytes::Bytes; use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; -use super::Accept; +use super::accept::Accept; use crate::body::{Body, HttpBody}; use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; #[cfg(feature = "http2")] diff --git a/src/server/mod.rs b/src/server/mod.rs index 9e8df1c899..fac33e06d6 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -52,452 +52,15 @@ //! ``` pub mod accept; -pub mod conn; -mod shutdown; -#[cfg(feature = "tcp")] -mod tcp; -use std::error::Error as StdError; -use std::fmt; -#[cfg(feature = "tcp")] -use std::net::{SocketAddr, TcpListener as StdTcpListener}; +cfg_feature! { + #![any(feature = "http1", feature = "http2")] -#[cfg(feature = "tcp")] -use std::time::Duration; + pub use self::server::{Builder, Server}; -use pin_project::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; - -use self::accept::Accept; -use crate::body::{Body, HttpBody}; -use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::service::{HttpService, MakeServiceRef}; -// Renamed `Http` as `Http_` for now so that people upgrading don't see an -// error that `hyper::server::Http` is private... -use self::conn::{Http as Http_, NoopWatcher, SpawnAll}; -use self::shutdown::{Graceful, GracefulWatcher}; -#[cfg(feature = "tcp")] -use self::tcp::AddrIncoming; - -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// `Server` is a `Future` mapping a bound listener with a set of service -/// handlers. It is built using the [`Builder`](Builder), and the future -/// completes when the server has been shutdown. It should be run by an -/// `Executor`. -#[pin_project] -pub struct Server { - #[pin] - spawn_all: SpawnAll, -} - -/// A builder for a [`Server`](Server). -#[derive(Debug)] -pub struct Builder { - incoming: I, - protocol: Http_, -} - -// ===== impl Server ===== - -impl Server { - /// Starts a [`Builder`](Builder) with the provided incoming stream. - pub fn builder(incoming: I) -> Builder { - Builder { - incoming, - protocol: Http_::new(), - } - } -} - -#[cfg(feature = "tcp")] -impl Server { - /// Binds to the provided address, and returns a [`Builder`](Builder). - /// - /// # Panics - /// - /// This method will panic if binding to the address fails. For a method - /// to bind to an address and return a `Result`, see `Server::try_bind`. - pub fn bind(addr: &SocketAddr) -> Builder { - let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { - panic!("error binding to {}: {}", addr, e); - }); - Server::builder(incoming) - } - - /// Tries to bind to the provided address, and returns a [`Builder`](Builder). - pub fn try_bind(addr: &SocketAddr) -> crate::Result> { - AddrIncoming::new(addr).map(Server::builder) - } - - /// Create a new instance from a `std::net::TcpListener` instance. - pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { - AddrIncoming::from_std(listener).map(Server::builder) - } -} - -#[cfg(feature = "tcp")] -impl Server { - /// Returns the local address that this server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.spawn_all.local_addr() - } -} - -impl Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + Send + Sync + 'static, - B::Error: Into>, - E: ConnStreamExec<>::Future, B>, - E: NewSvcExec, -{ - /// Prepares a server to handle graceful shutdown when the provided future - /// completes. - /// - /// # Example - /// - /// ``` - /// # fn main() {} - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// # use hyper::{Body, Response, Server, Error}; - /// # use hyper::service::{make_service_fn, service_fn}; - /// # let make_service = make_service_fn(|_| async { - /// # Ok::<_, Error>(service_fn(|_req| async { - /// # Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// # })) - /// # }); - /// // Make a server from the previous examples... - /// let server = Server::bind(&([127, 0, 0, 1], 3000).into()) - /// .serve(make_service); - /// - /// // Prepare some signal for when the server should start shutting down... - /// let (tx, rx) = tokio::sync::oneshot::channel::<()>(); - /// let graceful = server - /// .with_graceful_shutdown(async { - /// rx.await.ok(); - /// }); - /// - /// // Await the `server` receiving the signal... - /// if let Err(e) = graceful.await { - /// eprintln!("server error: {}", e); - /// } - /// - /// // And later, trigger the signal by calling `tx.send(())`. - /// let _ = tx.send(()); - /// # } - /// ``` - pub fn with_graceful_shutdown(self, signal: F) -> Graceful - where - F: Future, - { - Graceful::new(self.spawn_all, signal) - } -} - -impl Future for Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec<>::Future, B>, - E: NewSvcExec, -{ - type Output = crate::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.project().spawn_all.poll_watch(cx, &NoopWatcher) - } -} - -impl fmt::Debug for Server { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Server") - .field("listener", &self.spawn_all.incoming_ref()) - .finish() - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Start a new builder, wrapping an incoming stream and low-level options. - /// - /// For a more convenient constructor, see [`Server::bind`](Server::bind). - pub fn new(incoming: I, protocol: Http_) -> Self { - Builder { incoming, protocol } - } - - /// Sets whether to use keep-alive for HTTP/1 connections. - /// - /// Default is `true`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_keepalive(mut self, val: bool) -> Self { - self.protocol.http1_keep_alive(val); - self - } - - /// Set whether HTTP/1 connections should support half-closures. - /// - /// Clients can chose to shutdown their write-side while waiting - /// for the server to respond. Setting this to `true` will - /// prevent closing the connection immediately if `read` - /// detects an EOF in the middle of a request. - /// - /// Default is `false`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_half_close(mut self, val: bool) -> Self { - self.protocol.http1_half_close(val); - self - } - - /// Set the maximum buffer size. - /// - /// Default is ~ 400kb. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_max_buf_size(mut self, val: usize) -> Self { - self.protocol.max_buf_size(val); - self - } - - // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. - // - // This isn't really desirable in most cases, only really being useful in - // silly pipeline benchmarks. - #[doc(hidden)] - #[cfg(feature = "http1")] - pub fn http1_pipeline_flush(mut self, val: bool) -> Self { - self.protocol.pipeline_flush(val); - self - } - - /// Sets whether HTTP/1 is required. - /// - /// Default is `false`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_only(mut self, val: bool) -> Self { - self.protocol.http1_only(val); - self - } - - /// Sets whether HTTP/2 is required. - /// - /// Default is `false`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(mut self, val: bool) -> Self { - self.protocol.http2_only(val); - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(mut self, sz: impl Into>) -> Self { - self.protocol.http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size(mut self, sz: impl Into>) -> Self { - self.protocol - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(mut self, enabled: bool) -> Self { - self.protocol.http2_adaptive_window(enabled); - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(mut self, sz: impl Into>) -> Self { - self.protocol.http2_max_frame_size(sz); - self - } - - /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 - /// connections. - /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_concurrent_streams(mut self, max: impl Into>) -> Self { - self.protocol.http2_max_concurrent_streams(max.into()); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { - self.protocol.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { - self.protocol.http2_keep_alive_timeout(timeout); - self - } - - /// Sets the `Executor` to deal with connection tasks. - /// - /// Default is `tokio::spawn`. - pub fn executor(self, executor: E2) -> Builder { - Builder { - incoming: self.incoming, - protocol: self.protocol.with_executor(executor), - } - } - - /// Consume this `Builder`, creating a [`Server`](Server). - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// use hyper::{Body, Error, Response, Server}; - /// use hyper::service::{make_service_fn, service_fn}; - /// - /// // Construct our SocketAddr to listen on... - /// let addr = ([127, 0, 0, 1], 3000).into(); - /// - /// // And a MakeService to handle each connection... - /// let make_svc = make_service_fn(|_| async { - /// Ok::<_, Error>(service_fn(|_req| async { - /// Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// })) - /// }); - /// - /// // Then bind and serve... - /// let server = Server::bind(&addr) - /// .serve(make_svc); - /// - /// // Run forever-ish... - /// if let Err(err) = server.await { - /// eprintln!("server error: {}", err); - /// } - /// # } - /// ``` - pub fn serve(self, new_service: S) -> Server - where - I: Accept, - I::Error: Into>, - I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: NewSvcExec, - E: ConnStreamExec<>::Future, B>, - { - let serve = self.protocol.serve(self.incoming, new_service); - let spawn_all = serve.spawn_all(); - Server { spawn_all } - } -} - -#[cfg(feature = "tcp")] -impl Builder { - /// Set whether TCP keepalive messages are enabled on accepted connections. - /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. - pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { - self.incoming.set_keepalive(keepalive); - self - } - - /// Set the value of `TCP_NODELAY` option for accepted connections. - pub fn tcp_nodelay(mut self, enabled: bool) -> Self { - self.incoming.set_nodelay(enabled); - self - } - - /// Set whether to sleep on accept errors. - /// - /// A possible scenario is that the process has hit the max open files - /// allowed, and so trying to accept a new connection will fail with - /// EMFILE. In some cases, it's preferable to just wait for some time, if - /// the application will likely close some files (or connections), and try - /// to accept the connection again. If this option is true, the error will - /// be logged at the error level, since it is still a big deal, and then - /// the listener will sleep for 1 second. - /// - /// In other cases, hitting the max open files should be treat similarly - /// to being out-of-memory, and simply error (and shutdown). Setting this - /// option to false will allow that. - /// - /// For more details see [`AddrIncoming::set_sleep_on_errors`] - pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self { - self.incoming.set_sleep_on_errors(val); - self - } + pub mod conn; + mod server; + mod shutdown; + #[cfg(feature = "tcp")] + mod tcp; } diff --git a/src/server/server.rs b/src/server/server.rs new file mode 100644 index 0000000000..24037a9014 --- /dev/null +++ b/src/server/server.rs @@ -0,0 +1,444 @@ +use std::error::Error as StdError; +use std::fmt; +#[cfg(feature = "tcp")] +use std::net::{SocketAddr, TcpListener as StdTcpListener}; + +#[cfg(feature = "tcp")] +use std::time::Duration; + +use pin_project::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::accept::Accept; +use crate::body::{Body, HttpBody}; +use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; +use crate::common::{task, Future, Pin, Poll, Unpin}; +use crate::service::{HttpService, MakeServiceRef}; +// Renamed `Http` as `Http_` for now so that people upgrading don't see an +// error that `hyper::server::Http` is private... +use super::conn::{Http as Http_, NoopWatcher, SpawnAll}; +use super::shutdown::{Graceful, GracefulWatcher}; +#[cfg(feature = "tcp")] +use super::tcp::AddrIncoming; + +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. +/// +/// `Server` is a `Future` mapping a bound listener with a set of service +/// handlers. It is built using the [`Builder`](Builder), and the future +/// completes when the server has been shutdown. It should be run by an +/// `Executor`. +#[pin_project] +pub struct Server { + #[pin] + spawn_all: SpawnAll, +} + +/// A builder for a [`Server`](Server). +#[derive(Debug)] +pub struct Builder { + incoming: I, + protocol: Http_, +} + +// ===== impl Server ===== + +impl Server { + /// Starts a [`Builder`](Builder) with the provided incoming stream. + pub fn builder(incoming: I) -> Builder { + Builder { + incoming, + protocol: Http_::new(), + } + } +} + +#[cfg(feature = "tcp")] +impl Server { + /// Binds to the provided address, and returns a [`Builder`](Builder). + /// + /// # Panics + /// + /// This method will panic if binding to the address fails. For a method + /// to bind to an address and return a `Result`, see `Server::try_bind`. + pub fn bind(addr: &SocketAddr) -> Builder { + let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { + panic!("error binding to {}: {}", addr, e); + }); + Server::builder(incoming) + } + + /// Tries to bind to the provided address, and returns a [`Builder`](Builder). + pub fn try_bind(addr: &SocketAddr) -> crate::Result> { + AddrIncoming::new(addr).map(Server::builder) + } + + /// Create a new instance from a `std::net::TcpListener` instance. + pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { + AddrIncoming::from_std(listener).map(Server::builder) + } +} + +#[cfg(feature = "tcp")] +impl Server { + /// Returns the local address that this server is bound to. + pub fn local_addr(&self) -> SocketAddr { + self.spawn_all.local_addr() + } +} + +impl Server +where + I: Accept, + IE: Into>, + IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + Send + Sync + 'static, + B::Error: Into>, + E: ConnStreamExec<>::Future, B>, + E: NewSvcExec, +{ + /// Prepares a server to handle graceful shutdown when the provided future + /// completes. + /// + /// # Example + /// + /// ``` + /// # fn main() {} + /// # #[cfg(feature = "tcp")] + /// # async fn run() { + /// # use hyper::{Body, Response, Server, Error}; + /// # use hyper::service::{make_service_fn, service_fn}; + /// # let make_service = make_service_fn(|_| async { + /// # Ok::<_, Error>(service_fn(|_req| async { + /// # Ok::<_, Error>(Response::new(Body::from("Hello World"))) + /// # })) + /// # }); + /// // Make a server from the previous examples... + /// let server = Server::bind(&([127, 0, 0, 1], 3000).into()) + /// .serve(make_service); + /// + /// // Prepare some signal for when the server should start shutting down... + /// let (tx, rx) = tokio::sync::oneshot::channel::<()>(); + /// let graceful = server + /// .with_graceful_shutdown(async { + /// rx.await.ok(); + /// }); + /// + /// // Await the `server` receiving the signal... + /// if let Err(e) = graceful.await { + /// eprintln!("server error: {}", e); + /// } + /// + /// // And later, trigger the signal by calling `tx.send(())`. + /// let _ = tx.send(()); + /// # } + /// ``` + pub fn with_graceful_shutdown(self, signal: F) -> Graceful + where + F: Future, + { + Graceful::new(self.spawn_all, signal) + } +} + +impl Future for Server +where + I: Accept, + IE: Into>, + IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + 'static, + B::Error: Into>, + E: ConnStreamExec<>::Future, B>, + E: NewSvcExec, +{ + type Output = crate::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.project().spawn_all.poll_watch(cx, &NoopWatcher) + } +} + +impl fmt::Debug for Server { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Server") + .field("listener", &self.spawn_all.incoming_ref()) + .finish() + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Start a new builder, wrapping an incoming stream and low-level options. + /// + /// For a more convenient constructor, see [`Server::bind`](Server::bind). + pub fn new(incoming: I, protocol: Http_) -> Self { + Builder { incoming, protocol } + } + + /// Sets whether to use keep-alive for HTTP/1 connections. + /// + /// Default is `true`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_keepalive(mut self, val: bool) -> Self { + self.protocol.http1_keep_alive(val); + self + } + + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_half_close(mut self, val: bool) -> Self { + self.protocol.http1_half_close(val); + self + } + + /// Set the maximum buffer size. + /// + /// Default is ~ 400kb. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(mut self, val: usize) -> Self { + self.protocol.max_buf_size(val); + self + } + + // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. + // + // This isn't really desirable in most cases, only really being useful in + // silly pipeline benchmarks. + #[doc(hidden)] + #[cfg(feature = "http1")] + pub fn http1_pipeline_flush(mut self, val: bool) -> Self { + self.protocol.pipeline_flush(val); + self + } + + /// Sets whether HTTP/1 is required. + /// + /// Default is `false`. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_only(mut self, val: bool) -> Self { + self.protocol.http1_only(val); + self + } + + /// Sets whether HTTP/2 is required. + /// + /// Default is `false`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(mut self, val: bool) -> Self { + self.protocol.http2_only(val); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(mut self, sz: impl Into>) -> Self { + self.protocol.http2_initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size(mut self, sz: impl Into>) -> Self { + self.protocol + .http2_initial_connection_window_size(sz.into()); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(mut self, enabled: bool) -> Self { + self.protocol.http2_adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(mut self, sz: impl Into>) -> Self { + self.protocol.http2_max_frame_size(sz); + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_streams(mut self, max: impl Into>) -> Self { + self.protocol.http2_max_concurrent_streams(max.into()); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { + self.protocol.http2_keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { + self.protocol.http2_keep_alive_timeout(timeout); + self + } + + /// Sets the `Executor` to deal with connection tasks. + /// + /// Default is `tokio::spawn`. + pub fn executor(self, executor: E2) -> Builder { + Builder { + incoming: self.incoming, + protocol: self.protocol.with_executor(executor), + } + } + + /// Consume this `Builder`, creating a [`Server`](Server). + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tcp")] + /// # async fn run() { + /// use hyper::{Body, Error, Response, Server}; + /// use hyper::service::{make_service_fn, service_fn}; + /// + /// // Construct our SocketAddr to listen on... + /// let addr = ([127, 0, 0, 1], 3000).into(); + /// + /// // And a MakeService to handle each connection... + /// let make_svc = make_service_fn(|_| async { + /// Ok::<_, Error>(service_fn(|_req| async { + /// Ok::<_, Error>(Response::new(Body::from("Hello World"))) + /// })) + /// }); + /// + /// // Then bind and serve... + /// let server = Server::bind(&addr) + /// .serve(make_svc); + /// + /// // Run forever-ish... + /// if let Err(err) = server.await { + /// eprintln!("server error: {}", err); + /// } + /// # } + /// ``` + pub fn serve(self, new_service: S) -> Server + where + I: Accept, + I::Error: Into>, + I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: MakeServiceRef, + S::Error: Into>, + B: HttpBody + 'static, + B::Error: Into>, + E: NewSvcExec, + E: ConnStreamExec<>::Future, B>, + { + let serve = self.protocol.serve(self.incoming, new_service); + let spawn_all = serve.spawn_all(); + Server { spawn_all } + } +} + +#[cfg(feature = "tcp")] +impl Builder { + /// Set whether TCP keepalive messages are enabled on accepted connections. + /// + /// If `None` is specified, keepalive is disabled, otherwise the duration + /// specified will be the time to remain idle before sending TCP keepalive + /// probes. + pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { + self.incoming.set_keepalive(keepalive); + self + } + + /// Set the value of `TCP_NODELAY` option for accepted connections. + pub fn tcp_nodelay(mut self, enabled: bool) -> Self { + self.incoming.set_nodelay(enabled); + self + } + + /// Set whether to sleep on accept errors. + /// + /// A possible scenario is that the process has hit the max open files + /// allowed, and so trying to accept a new connection will fail with + /// EMFILE. In some cases, it's preferable to just wait for some time, if + /// the application will likely close some files (or connections), and try + /// to accept the connection again. If this option is true, the error will + /// be logged at the error level, since it is still a big deal, and then + /// the listener will sleep for 1 second. + /// + /// In other cases, hitting the max open files should be treat similarly + /// to being out-of-memory, and simply error (and shutdown). Setting this + /// option to false will allow that. + /// + /// For more details see [`AddrIncoming::set_sleep_on_errors`] + pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self { + self.incoming.set_sleep_on_errors(val); + self + } +} diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index 8f1a45ae05..e54ba42104 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -4,7 +4,7 @@ use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; -use super::Accept; +use super::accept::Accept; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 52d68e62b4..c6cfc98937 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -9,7 +9,7 @@ use tokio::time::Sleep; use crate::common::{task, Future, Pin, Poll}; pub use self::addr_stream::AddrStream; -use super::Accept; +use super::accept::Accept; /// A stream of connections from binding to an address. #[must_use = "streams do nothing unless polled"] From 8861f9a7867216c81ea14ac6224c11a1303e7761 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 29 Dec 2020 10:51:50 -0800 Subject: [PATCH 006/420] v0.14.2 --- CHANGELOG.md | 9 +++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29d29b39c6..74306dd2d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +### v0.14.2 (2020-12-29) + + +#### Features + +* **client:** expose `connect` types without proto feature (#2377) ([73a59e5f](https://github.com/hyperium/hyper/commit/73a59e5fc7ddedcb7cbd91e97b33385fde57aa10)) +* **server:** expose `Accept` without httpX features (#2382) ([a6d4fcbe](https://github.com/hyperium/hyper/commit/a6d4fcbee65bebf461291def75f4c512ec62a664)) + + ### v0.14.1 (2020-12-23) * Fixes building documentation. diff --git a/Cargo.toml b/Cargo.toml index 4874988bd1..50b1463da5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.1" # don't forget to update html_root_url +version = "0.14.2" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" diff --git a/src/lib.rs b/src/lib.rs index d37170778f..7268725e5f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.14.1")] +#![doc(html_root_url = "https://docs.rs/hyper/0.14.2")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] From 3ae1581a539b67363bd87d9d8fc8635a204eec5d Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 7 Jan 2021 17:22:12 -0800 Subject: [PATCH 007/420] feat(ffi): Initial C API for hyper --- .github/workflows/CI.yml | 49 +++- .gitignore | 4 +- Cargo.toml | 8 +- capi/README.md | 17 ++ capi/cbindgen.toml | 14 + capi/examples/Makefile | 22 ++ capi/examples/client.c | 343 ++++++++++++++++++++++++ capi/examples/upload.c | 386 +++++++++++++++++++++++++++ capi/gen_header.sh | 72 +++++ capi/include/hyper.h | 554 +++++++++++++++++++++++++++++++++++++++ src/body/body.rs | 28 ++ src/error.rs | 14 +- src/ffi/body.rs | 233 ++++++++++++++++ src/ffi/client.rs | 148 +++++++++++ src/ffi/error.rs | 80 ++++++ src/ffi/http_types.rs | 267 +++++++++++++++++++ src/ffi/io.rs | 173 ++++++++++++ src/ffi/macros.rs | 23 ++ src/ffi/mod.rs | 55 ++++ src/ffi/task.rs | 415 +++++++++++++++++++++++++++++ src/lib.rs | 3 + src/proto/h1/dispatch.rs | 16 +- 22 files changed, 2910 insertions(+), 14 deletions(-) create mode 100644 capi/README.md create mode 100644 capi/cbindgen.toml create mode 100644 capi/examples/Makefile create mode 100644 capi/examples/client.c create mode 100644 capi/examples/upload.c create mode 100755 capi/gen_header.sh create mode 100644 capi/include/hyper.h create mode 100644 src/ffi/body.rs create mode 100644 src/ffi/client.rs create mode 100644 src/ffi/error.rs create mode 100644 src/ffi/http_types.rs create mode 100644 src/ffi/io.rs create mode 100644 src/ffi/macros.rs create mode 100644 src/ffi/mod.rs create mode 100644 src/ffi/task.rs diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 7aeceb3f10..ca864d38a6 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -16,6 +16,7 @@ jobs: - style - test - features + - ffi - doc steps: - run: exit 0 @@ -111,7 +112,53 @@ jobs: run: cargo install cargo-hack - name: check --each-feature - run: cargo hack check --each-feature -Z avoid-dev-deps + run: cargo hack check --each-feature --skip ffi -Z avoid-dev-deps + + ffi: + name: Test C API (FFI) + needs: [style] + + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Install cbindgen + uses: actions-rs/cargo@v1 + with: + command: install + args: cbindgen + + - name: Build FFI + uses: actions-rs/cargo@v1 + env: + RUSTFLAGS: --cfg hyper_unstable_ffi + with: + command: build + args: --features client,http1,http2,ffi + + # TODO: re-enable check once figuring out how to get it working in CI + # - name: Verify cbindgen + # run: ./capi/gen_header.sh --verify + + - name: Make Examples + run: cd capi/examples && make client + + - name: Run FFI unit tests + uses: actions-rs/cargo@v1 + env: + RUSTFLAGS: --cfg hyper_unstable_ffi + with: + command: test + args: --features full,ffi --lib doc: name: Build docs diff --git a/.gitignore b/.gitignore index 4fffb2f89c..a9d37c560c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -/target -/Cargo.lock +target +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml index 50b1463da5..f5357ed5ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,9 @@ include = [ #"build.rs", ] +[lib] +crate-type = ["lib", "staticlib", "cdylib"] + [dependencies] bytes = "1" futures-core = { version = "0.3", default-features = false } @@ -38,6 +41,7 @@ want = "0.3" # Optional +libc = { version = "0.2", optional = true } socket2 = { version = "0.3.16", optional = true } [dev-dependencies] @@ -94,7 +98,6 @@ server = [] stream = [] # Tokio support - runtime = [ "tcp", "tokio/rt", @@ -106,6 +109,9 @@ tcp = [ "tokio/time", ] +# C-API support (currently unstable (no semver)) +ffi = ["libc"] + # internal features used in CI nightly = [] __internal_happy_eyeballs_tests = [] diff --git a/capi/README.md b/capi/README.md new file mode 100644 index 0000000000..9d6f9f6d14 --- /dev/null +++ b/capi/README.md @@ -0,0 +1,17 @@ +# C API for hyper + +This provides auxiliary pieces for a C API to use the hyper library. + +## Unstable + +The C API of hyper is currently **unstable**, which means it's not part of the semver contract as the rest of the Rust API is. + +Because of that, it's only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when compiling. The easiest way to do that is setting the `RUSTFLAGS` environment variable. + +## Building + +The C API is part of the Rust library, but isn't compiled by default. Using `cargo`, it can be compiled with the following command: + +``` +RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi +``` diff --git a/capi/cbindgen.toml b/capi/cbindgen.toml new file mode 100644 index 0000000000..fd611e18c4 --- /dev/null +++ b/capi/cbindgen.toml @@ -0,0 +1,14 @@ +language = "C" +include_guard = "_HYPER_H" +no_includes = true +sys_includes = ["stdint.h", "stddef.h"] +cpp_compat = true +documentation_style = "c" + +[parse.expand] +crates = ["hyper-capi"] + +[export.rename] +"Exec" = "hyper_executor" +"Io" = "hyper_io" +"Task" = "hyper_task" diff --git a/capi/examples/Makefile b/capi/examples/Makefile new file mode 100644 index 0000000000..6cc0a69575 --- /dev/null +++ b/capi/examples/Makefile @@ -0,0 +1,22 @@ +# +# Build the example client +# + +TARGET = client + +OBJS = client.o + +RPATH=$(PWD)/../../target/debug +CFLAGS = -I../include +LDFLAGS = -L$(RPATH) -Wl,-rpath,$(RPATH) +LIBS = -lhyper + +$(TARGET): $(OBJS) + $(CC) -o $(TARGET) $(OBJS) $(LDFLAGS) $(LIBS) + +upload: upload.o + $(CC) -o upload upload.o $(LDFLAGS) $(LIBS) + +clean: + rm -f $(OBJS) $(TARGET) + rm -f upload upload.o diff --git a/capi/examples/client.c b/capi/examples/client.c new file mode 100644 index 0000000000..6ed66a46db --- /dev/null +++ b/capi/examples/client.c @@ -0,0 +1,343 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "hyper.h" + + +struct conn_data { + int fd; + hyper_waker *read_waker; + hyper_waker *write_waker; +}; + +static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t buf_len) { + struct conn_data *conn = (struct conn_data *)userdata; + ssize_t ret = read(conn->fd, buf, buf_len); + + if (ret < 0) { + int err = errno; + if (err == EAGAIN) { + // would block, register interest + if (conn->read_waker != NULL) { + hyper_waker_free(conn->read_waker); + } + conn->read_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; + } else { + // kaboom + return HYPER_IO_ERROR; + } + } else { + return ret; + } +} + +static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) { + struct conn_data *conn = (struct conn_data *)userdata; + ssize_t ret = write(conn->fd, buf, buf_len); + + if (ret < 0) { + int err = errno; + if (err == EAGAIN) { + // would block, register interest + if (conn->write_waker != NULL) { + hyper_waker_free(conn->write_waker); + } + conn->write_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; + } else { + // kaboom + return HYPER_IO_ERROR; + } + } else { + return ret; + } +} + +static void free_conn_data(struct conn_data *conn) { + if (conn->read_waker) { + hyper_waker_free(conn->read_waker); + conn->read_waker = NULL; + } + if (conn->write_waker) { + hyper_waker_free(conn->write_waker); + conn->write_waker = NULL; + } + + free(conn); +} + +static int connect_to(const char *host, const char *port) { + struct addrinfo hints; + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + struct addrinfo *result, *rp; + if (getaddrinfo(host, port, &hints, &result) != 0) { + printf("dns failed for %s\n", host); + return -1; + } + + int sfd; + for (rp = result; rp != NULL; rp = rp->ai_next) { + sfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); + if (sfd == -1) { + continue; + } + + if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) { + break; + } else { + close(sfd); + } + } + + freeaddrinfo(result); + + // no address succeeded + if (rp == NULL) { + printf("connect failed for %s\n", host); + return -1; + } + + return sfd; +} + +static int print_each_header(void *userdata, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len) { + printf("%.*s: %.*s\n", (int) name_len, name, (int) value_len, value); + return HYPER_ITER_CONTINUE; +} + +static int print_each_chunk(void *userdata, const hyper_buf *chunk) { + const uint8_t *buf = hyper_buf_bytes(chunk); + size_t len = hyper_buf_len(chunk); + + write(1, buf, len); + + return HYPER_ITER_CONTINUE; +} + +typedef enum { + EXAMPLE_NOT_SET = 0, // tasks we don't know about won't have a userdata set + EXAMPLE_HANDSHAKE, + EXAMPLE_SEND, + EXAMPLE_RESP_BODY +} example_id; + +#define STR_ARG(XX) (uint8_t *)XX, strlen(XX) + +int main(int argc, char *argv[]) { + const char *host = argc > 1 ? argv[1] : "httpbin.org"; + const char *port = argc > 2 ? argv[2] : "80"; + const char *path = argc > 3 ? argv[3] : "/"; + printf("connecting to port %s on %s...\n", port, host); + + int fd = connect_to(host, port); + if (fd < 0) { + return 1; + } + printf("connected to %s, now get %s\n", host, path); + + if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) { + printf("failed to set socket to non-blocking\n"); + return 1; + } + + fd_set fds_read; + fd_set fds_write; + fd_set fds_excep; + + struct conn_data *conn = malloc(sizeof(struct conn_data)); + + conn->fd = fd; + conn->read_waker = NULL; + conn->write_waker = NULL; + + + // Hookup the IO + hyper_io *io = hyper_io_new(); + hyper_io_set_userdata(io, (void *)conn); + hyper_io_set_read(io, read_cb); + hyper_io_set_write(io, write_cb); + + printf("http handshake ...\n"); + + // We need an executor generally to poll futures + const hyper_executor *exec = hyper_executor_new(); + + // Prepare client options + hyper_clientconn_options *opts = hyper_clientconn_options_new(); + hyper_clientconn_options_exec(opts, exec); + + hyper_task *handshake = hyper_clientconn_handshake(io, opts); + hyper_task_set_userdata(handshake, (void *)EXAMPLE_HANDSHAKE); + + // Let's wait for the handshake to finish... + hyper_executor_push(exec, handshake); + + // In case a task errors... + hyper_error *err; + + // The polling state machine! + while (1) { + // Poll all ready tasks and act on them... + while (1) { + hyper_task *task = hyper_executor_poll(exec); + if (!task) { + break; + } + switch ((example_id) hyper_task_userdata(task)) { + case EXAMPLE_HANDSHAKE: + ; + if (hyper_task_type(task) == HYPER_TASK_ERROR) { + printf("handshake error!\n"); + err = hyper_task_value(task); + goto fail; + } + assert(hyper_task_type(task) == HYPER_TASK_CLIENTCONN); + + printf("preparing http request ...\n"); + + hyper_clientconn *client = hyper_task_value(task); + hyper_task_free(task); + + // Prepare the request + hyper_request *req = hyper_request_new(); + if (hyper_request_set_method(req, STR_ARG("GET"))) { + printf("error setting method\n"); + return 1; + } + if (hyper_request_set_uri(req, STR_ARG(path))) { + printf("error setting uri\n"); + return 1; + } + + hyper_headers *req_headers = hyper_request_headers(req); + hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); + + // Send it! + hyper_task *send = hyper_clientconn_send(client, req); + hyper_task_set_userdata(send, (void *)EXAMPLE_SEND); + printf("sending ...\n"); + hyper_executor_push(exec, send); + + // For this example, no longer need the client + hyper_clientconn_free(client); + + break; + case EXAMPLE_SEND: + ; + if (hyper_task_type(task) == HYPER_TASK_ERROR) { + printf("send error!\n"); + err = hyper_task_value(task); + goto fail; + } + assert(hyper_task_type(task) == HYPER_TASK_RESPONSE); + + // Take the results + hyper_response *resp = hyper_task_value(task); + hyper_task_free(task); + + uint16_t http_status = hyper_response_status(resp); + + printf("\nResponse Status: %d\n", http_status); + + hyper_headers *headers = hyper_response_headers(resp); + hyper_headers_foreach(headers, print_each_header, NULL); + printf("\n"); + + hyper_body *resp_body = hyper_response_body(resp); + hyper_task *foreach = hyper_body_foreach(resp_body, print_each_chunk, NULL); + hyper_task_set_userdata(foreach, (void *)EXAMPLE_RESP_BODY); + hyper_executor_push(exec, foreach); + + // No longer need the response + hyper_response_free(resp); + + break; + case EXAMPLE_RESP_BODY: + ; + if (hyper_task_type(task) == HYPER_TASK_ERROR) { + printf("body error!\n"); + err = hyper_task_value(task); + goto fail; + } + + assert(hyper_task_type(task) == HYPER_TASK_EMPTY); + + printf("\n -- Done! -- \n"); + + // Cleaning up before exiting + hyper_task_free(task); + hyper_executor_free(exec); + free_conn_data(conn); + + return 0; + case EXAMPLE_NOT_SET: + // A background task for hyper completed... + hyper_task_free(task); + break; + } + } + + // All futures are pending on IO work, so select on the fds. + + FD_ZERO(&fds_read); + FD_ZERO(&fds_write); + FD_ZERO(&fds_excep); + + if (conn->read_waker) { + FD_SET(conn->fd, &fds_read); + } + if (conn->write_waker) { + FD_SET(conn->fd, &fds_write); + } + + int sel_ret = select(conn->fd + 1, &fds_read, &fds_write, &fds_excep, NULL); + + if (sel_ret < 0) { + printf("select() error\n"); + return 1; + } else { + if (FD_ISSET(conn->fd, &fds_read)) { + hyper_waker_wake(conn->read_waker); + conn->read_waker = NULL; + } + if (FD_ISSET(conn->fd, &fds_write)) { + hyper_waker_wake(conn->write_waker); + conn->write_waker = NULL; + } + } + + } + + return 0; + +fail: + if (err) { + printf("error code: %d\n", hyper_error_code(err)); + // grab the error details + char errbuf [256]; + size_t errlen = hyper_error_print(err, errbuf, sizeof(errbuf)); + printf("details: %.*s\n", (int) errlen, errbuf); + + // clean up the error + hyper_error_free(err); + } + return 1; +} diff --git a/capi/examples/upload.c b/capi/examples/upload.c new file mode 100644 index 0000000000..ed6f37a709 --- /dev/null +++ b/capi/examples/upload.c @@ -0,0 +1,386 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "hyper.h" + + +struct conn_data { + int fd; + hyper_waker *read_waker; + hyper_waker *write_waker; +}; + +static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t buf_len) { + struct conn_data *conn = (struct conn_data *)userdata; + ssize_t ret = read(conn->fd, buf, buf_len); + + if (ret < 0) { + int err = errno; + if (err == EAGAIN) { + // would block, register interest + if (conn->read_waker != NULL) { + hyper_waker_free(conn->read_waker); + } + conn->read_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; + } else { + // kaboom + return HYPER_IO_ERROR; + } + } else { + return ret; + } +} + +static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) { + struct conn_data *conn = (struct conn_data *)userdata; + ssize_t ret = write(conn->fd, buf, buf_len); + + if (ret < 0) { + int err = errno; + if (err == EAGAIN) { + // would block, register interest + if (conn->write_waker != NULL) { + hyper_waker_free(conn->write_waker); + } + conn->write_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; + } else { + // kaboom + return HYPER_IO_ERROR; + } + } else { + return ret; + } +} + +static void free_conn_data(struct conn_data *conn) { + if (conn->read_waker) { + hyper_waker_free(conn->read_waker); + conn->read_waker = NULL; + } + if (conn->write_waker) { + hyper_waker_free(conn->write_waker); + conn->write_waker = NULL; + } + + free(conn); +} + +static int connect_to(const char *host, const char *port) { + struct addrinfo hints; + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + struct addrinfo *result, *rp; + if (getaddrinfo(host, port, &hints, &result) != 0) { + printf("dns failed for %s\n", host); + return -1; + } + + int sfd; + for (rp = result; rp != NULL; rp = rp->ai_next) { + sfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); + if (sfd == -1) { + continue; + } + + if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) { + break; + } else { + close(sfd); + } + } + + freeaddrinfo(result); + + // no address succeeded + if (rp == NULL) { + printf("connect failed for %s\n", host); + return -1; + } + + return sfd; +} + +struct upload_body { + int fd; + char *buf; + size_t len; +}; + +static int poll_req_upload(void *userdata, + hyper_context *ctx, + hyper_buf **chunk) { + struct upload_body* upload = userdata; + + ssize_t res = read(upload->fd, upload->buf, upload->len); + if (res < 0) { + printf("error reading upload file: %d", errno); + return HYPER_POLL_ERROR; + } else if (res == 0) { + // All done! + *chunk = NULL; + return HYPER_POLL_READY; + } else { + *chunk = hyper_buf_copy(upload->buf, res); + return HYPER_POLL_READY; + } +} + +static int print_each_header(void *userdata, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len) { + printf("%.*s: %.*s\n", (int) name_len, name, (int) value_len, value); + return HYPER_ITER_CONTINUE; +} + +typedef enum { + EXAMPLE_NOT_SET = 0, // tasks we don't know about won't have a userdata set + EXAMPLE_HANDSHAKE, + EXAMPLE_SEND, + EXAMPLE_RESP_BODY +} example_id; + +#define STR_ARG(XX) (uint8_t *)XX, strlen(XX) + +int main(int argc, char *argv[]) { + const char *file = argc > 1 ? argv[1] : NULL; + const char *host = argc > 2 ? argv[2] : "httpbin.org"; + const char *port = argc > 3 ? argv[3] : "80"; + const char *path = argc > 4 ? argv[4] : "/post"; + + if (!file) { + printf("Pass a file path as the first argument.\n"); + return 1; + } + + struct upload_body upload; + upload.fd = open(file, O_RDONLY); + + if (upload.fd < 0) { + printf("error opening file to upload: %d", errno); + return 1; + } + printf("connecting to port %s on %s...\n", port, host); + + int fd = connect_to(host, port); + if (fd < 0) { + return 1; + } + printf("connected to %s, now upload to %s\n", host, path); + + if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) { + printf("failed to set socket to non-blocking\n"); + return 1; + } + + upload.len = 8192; + upload.buf = malloc(upload.len); + + fd_set fds_read; + fd_set fds_write; + fd_set fds_excep; + + struct conn_data *conn = malloc(sizeof(struct conn_data)); + + conn->fd = fd; + conn->read_waker = NULL; + conn->write_waker = NULL; + + + // Hookup the IO + hyper_io *io = hyper_io_new(); + hyper_io_set_userdata(io, (void *)conn); + hyper_io_set_read(io, read_cb); + hyper_io_set_write(io, write_cb); + + printf("http handshake ...\n"); + + // We need an executor generally to poll futures + const hyper_executor *exec = hyper_executor_new(); + + // Prepare client options + hyper_clientconn_options *opts = hyper_clientconn_options_new(); + hyper_clientconn_options_exec(opts, exec); + + hyper_task *handshake = hyper_clientconn_handshake(io, opts); + hyper_task_set_userdata(handshake, (void *)EXAMPLE_HANDSHAKE); + + // Let's wait for the handshake to finish... + hyper_executor_push(exec, handshake); + + // This body will get filled in eventually... + hyper_body *resp_body = NULL; + + // The polling state machine! + while (1) { + // Poll all ready tasks and act on them... + while (1) { + hyper_task *task = hyper_executor_poll(exec); + if (!task) { + break; + } + hyper_task_return_type task_type = hyper_task_type(task); + + switch ((example_id) hyper_task_userdata(task)) { + case EXAMPLE_HANDSHAKE: + ; + if (task_type == HYPER_TASK_ERROR) { + printf("handshake error!\n"); + return 1; + } + assert(task_type == HYPER_TASK_CLIENTCONN); + + printf("preparing http request ...\n"); + + hyper_clientconn *client = hyper_task_value(task); + hyper_task_free(task); + + // Prepare the request + hyper_request *req = hyper_request_new(); + if (hyper_request_set_method(req, STR_ARG("POST"))) { + printf("error setting method\n"); + return 1; + } + if (hyper_request_set_uri(req, STR_ARG(path))) { + printf("error setting uri\n"); + return 1; + } + + hyper_headers *req_headers = hyper_request_headers(req); + hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); + + // Prepare the req body + hyper_body *body = hyper_body_new(); + hyper_body_set_userdata(body, &upload); + hyper_body_set_data_func(body, poll_req_upload); + hyper_request_set_body(req, body); + + // Send it! + hyper_task *send = hyper_clientconn_send(client, req); + hyper_task_set_userdata(send, (void *)EXAMPLE_SEND); + printf("sending ...\n"); + hyper_executor_push(exec, send); + + // For this example, no longer need the client + hyper_clientconn_free(client); + + break; + case EXAMPLE_SEND: + ; + if (task_type == HYPER_TASK_ERROR) { + printf("send error!\n"); + return 1; + } + assert(task_type == HYPER_TASK_RESPONSE); + + // Take the results + hyper_response *resp = hyper_task_value(task); + hyper_task_free(task); + + uint16_t http_status = hyper_response_status(resp); + + printf("\nResponse Status: %d\n", http_status); + + hyper_headers *headers = hyper_response_headers(resp); + hyper_headers_foreach(headers, print_each_header, NULL); + printf("\n"); + + resp_body = hyper_response_body(resp); + + // Set us up to peel data from the body a chunk at a time + hyper_task *body_data = hyper_body_data(resp_body); + hyper_task_set_userdata(body_data, (void *)EXAMPLE_RESP_BODY); + hyper_executor_push(exec, body_data); + + // No longer need the response + hyper_response_free(resp); + + break; + case EXAMPLE_RESP_BODY: + ; + if (task_type == HYPER_TASK_ERROR) { + printf("body error!\n"); + return 1; + } + + if (task_type == HYPER_TASK_BUF) { + hyper_buf *chunk = hyper_task_value(task); + write(1, hyper_buf_bytes(chunk), hyper_buf_len(chunk)); + hyper_buf_free(chunk); + hyper_task_free(task); + + hyper_task *body_data = hyper_body_data(resp_body); + hyper_task_set_userdata(body_data, (void *)EXAMPLE_RESP_BODY); + hyper_executor_push(exec, body_data); + + break; + } else { + assert(task_type == HYPER_TASK_EMPTY); + hyper_task_free(task); + hyper_body_free(resp_body); + + printf("\n -- Done! -- \n"); + + // Cleaning up before exiting + hyper_executor_free(exec); + free_conn_data(conn); + free(upload.buf); + + return 0; + } + case EXAMPLE_NOT_SET: + // A background task for hyper completed... + hyper_task_free(task); + break; + } + } + + // All futures are pending on IO work, so select on the fds. + + FD_ZERO(&fds_read); + FD_ZERO(&fds_write); + FD_ZERO(&fds_excep); + + if (conn->read_waker) { + FD_SET(conn->fd, &fds_read); + } + if (conn->write_waker) { + FD_SET(conn->fd, &fds_write); + } + + int sel_ret = select(conn->fd + 1, &fds_read, &fds_write, &fds_excep, NULL); + + if (sel_ret < 0) { + printf("select() error\n"); + return 1; + } else { + if (FD_ISSET(conn->fd, &fds_read)) { + hyper_waker_wake(conn->read_waker); + conn->read_waker = NULL; + } + if (FD_ISSET(conn->fd, &fds_write)) { + hyper_waker_wake(conn->write_waker); + conn->write_waker = NULL; + } + } + + } + + + return 0; +} diff --git a/capi/gen_header.sh b/capi/gen_header.sh new file mode 100755 index 0000000000..4cd1a26c23 --- /dev/null +++ b/capi/gen_header.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +WORK_DIR=`mktemp -d` + + +# check if tmp dir was created +if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then + echo "Could not create temp dir" + exit 1 +fi + +header_file_backup="$CAPI_DIR/include/hyper.h.backup" + +function cleanup { + #echo "$WORK_DIR" + rm -rf "$WORK_DIR" + rm "$header_file_backup" +} + +trap cleanup EXIT + +mkdir "$WORK_DIR/src" + +# Fake a library +cat > "$WORK_DIR/src/lib.rs" << EOF +#[path = "$CAPI_DIR/../src/ffi/mod.rs"] +pub mod ffi; +EOF + +# And its Cargo.toml +cat > "$WORK_DIR/Cargo.toml" << EOF +[package] +name = "hyper" +version = "0.0.0" +edition = "2018" +publish = false + +[dependencies] +EOF + +cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" + +#cargo metadata --no-default-features --features ffi --format-version 1 > "$WORK_DIR/metadata.json" + +cd $WORK_DIR + +# Expand just the ffi module +cargo rustc -- -Z unstable-options --pretty=expanded > expanded.rs 2>/dev/null + +# Replace the previous copy with the single expanded file +rm -rf ./src +mkdir src +mv expanded.rs src/lib.rs + + +# Bindgen! +cbindgen\ + -c "$CAPI_DIR/cbindgen.toml"\ + --lockfile "$CAPI_DIR/../Cargo.lock"\ + -o "$CAPI_DIR/include/hyper.h"\ + $1 + +bindgen_exit_code=$? + +if [[ "--verify" == "$1" && "$bindgen_exit_code" != 0 ]]; then + echo "diff generated (<) vs backup (>)" + diff "$CAPI_DIR/include/hyper.h" "$header_file_backup" +fi + +exit $bindgen_exit_code diff --git a/capi/include/hyper.h b/capi/include/hyper.h new file mode 100644 index 0000000000..f2a6f8dbfb --- /dev/null +++ b/capi/include/hyper.h @@ -0,0 +1,554 @@ +#ifndef _HYPER_H +#define _HYPER_H + +#include +#include + +#define HYPER_ITER_CONTINUE 0 + +#define HYPER_ITER_BREAK 1 + +#define HYPER_HTTP_VERSION_NONE 0 + +#define HYPER_HTTP_VERSION_1_0 10 + +#define HYPER_HTTP_VERSION_1_1 11 + +#define HYPER_HTTP_VERSION_2 20 + +#define HYPER_IO_PENDING 4294967295 + +#define HYPER_IO_ERROR 4294967294 + +#define HYPER_POLL_READY 0 + +#define HYPER_POLL_PENDING 1 + +#define HYPER_POLL_ERROR 3 + +typedef enum { + /* + All is well. + */ + HYPERE_OK, + /* + General error, details in the `hyper_error *`. + */ + HYPERE_ERROR, + /* + A function argument was invalid. + */ + HYPERE_INVALID_ARG, + /* + The IO transport returned an EOF when one wasn't expected. + + This typically means an HTTP request or response was expected, but the + connection closed cleanly without sending (all of) it. + */ + HYPERE_UNEXPECTED_EOF, + /* + Aborted by a user supplied callback. + */ + HYPERE_ABORTED_BY_CALLBACK, + /* + An optional hyper feature was not enabled. + */ + HYPERE_FEATURE_NOT_ENABLED, +} hyper_code; + +typedef enum { + /* + The value of this task is null (does not imply an error). + */ + HYPER_TASK_EMPTY, + /* + The value of this task is `hyper_error *`. + */ + HYPER_TASK_ERROR, + /* + The value of this task is `hyper_clientconn *`. + */ + HYPER_TASK_CLIENTCONN, + /* + The value of this task is `hyper_response *`. + */ + HYPER_TASK_RESPONSE, + /* + The value of this task is `hyper_buf *`. + */ + HYPER_TASK_BUF, +} hyper_task_return_type; + +typedef struct hyper_executor hyper_executor; + +typedef struct hyper_io hyper_io; + +typedef struct hyper_task hyper_task; + +typedef struct hyper_body hyper_body; + +typedef struct hyper_buf hyper_buf; + +typedef struct hyper_clientconn hyper_clientconn; + +typedef struct hyper_clientconn_options hyper_clientconn_options; + +typedef struct hyper_context hyper_context; + +typedef struct hyper_error hyper_error; + +typedef struct hyper_headers hyper_headers; + +typedef struct hyper_request hyper_request; + +typedef struct hyper_response hyper_response; + +typedef struct hyper_waker hyper_waker; + +typedef int (*hyper_body_foreach_callback)(void*, const hyper_buf*); + +typedef int (*hyper_body_data_callback)(void*, hyper_context*, hyper_buf**); + +typedef int (*hyper_headers_foreach_callback)(void*, const uint8_t*, size_t, const uint8_t*, size_t); + +typedef size_t (*hyper_io_read_callback)(void*, hyper_context*, uint8_t*, size_t); + +typedef size_t (*hyper_io_write_callback)(void*, hyper_context*, const uint8_t*, size_t); + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/* + Returns a static ASCII (null terminated) string of the hyper version. + */ +const char *hyper_version(void); + +/* + Create a new "empty" body. + + If not configured, this body acts as an empty payload. + */ +hyper_body *hyper_body_new(void); + +/* + Free a `hyper_body *`. + */ +void hyper_body_free(hyper_body *body); + +/* + Return a task that will poll the body for the next buffer of data. + + The task value may have different types depending on the outcome: + + - `HYPER_TASK_BUF`: Success, and more data was received. + - `HYPER_TASK_ERROR`: An error retrieving the data. + - `HYPER_TASK_EMPTY`: The body has finished streaming data. + + This does not consume the `hyper_body *`, so it may be used to again. + However, it MUST NOT be used or freed until the related task completes. + */ +hyper_task *hyper_body_data(hyper_body *body); + +/* + Return a task that will poll the body and execute the callback with each + body chunk that is received. + + The `hyper_buf` pointer is only a borrowed reference, it cannot live outside + the execution of the callback. You must make a copy to retain it. + + The callback should return `HYPER_ITER_CONTINUE` to continue iterating + chunks as they are received, or `HYPER_ITER_BREAK` to cancel. + + This will consume the `hyper_body *`, you shouldn't use it anymore or free it. + */ +hyper_task *hyper_body_foreach(hyper_body *body, hyper_body_foreach_callback func, void *userdata); + +/* + Set userdata on this body, which will be passed to callback functions. + */ +void hyper_body_set_userdata(hyper_body *body, void *userdata); + +/* + Set the data callback for this body. + + The callback is called each time hyper needs to send more data for the + body. It is passed the value from `hyper_body_set_userdata`. + + If there is data available, the `hyper_buf **` argument should be set + to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should + be returned. + + Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points + to `NULL` will indicate the body has completed all data. + + If there is more data to send, but it isn't yet available, a + `hyper_waker` should be saved from the `hyper_context *` argument, and + `HYPER_POLL_PENDING` should be returned. You must wake the saved waker + to signal the task when data is available. + + If some error has occurred, you can return `HYPER_POLL_ERROR` to abort + the body. + */ +void hyper_body_set_data_func(hyper_body *body, hyper_body_data_callback func); + +/* + Create a new `hyper_buf *` by copying the provided bytes. + + This makes an owned copy of the bytes, so the `buf` argument can be + freed or changed afterwards. + */ +hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len); + +/* + Get a pointer to the bytes in this buffer. + + This should be used in conjunction with `hyper_buf_len` to get the length + of the bytes data. + + This pointer is borrowed data, and not valid once the `hyper_buf` is + consumed/freed. + */ +const uint8_t *hyper_buf_bytes(const hyper_buf *buf); + +/* + Get the length of the bytes this buffer contains. + */ +size_t hyper_buf_len(const hyper_buf *buf); + +/* + Free this buffer. + */ +void hyper_buf_free(hyper_buf *buf); + +/* + Starts an HTTP client connection handshake using the provided IO transport + and options. + + Both the `io` and the `options` are consumed in this function call. + + The returned `hyper_task *` must be polled with an executor until the + handshake completes, at which point the value can be taken. + */ +hyper_task *hyper_clientconn_handshake(hyper_io *io, hyper_clientconn_options *options); + +/* + Send a request on the client connection. + + Returns a task that needs to be polled until it is ready. When ready, the + task yields a `hyper_response *`. + */ +hyper_task *hyper_clientconn_send(hyper_clientconn *conn, hyper_request *req); + +/* + Free a `hyper_clientconn *`. + */ +void hyper_clientconn_free(hyper_clientconn *conn); + +/* + Creates a new set of HTTP clientconn options to be used in a handshake. + */ +hyper_clientconn_options *hyper_clientconn_options_new(void); + +/* + Free a `hyper_clientconn_options *`. + */ +void hyper_clientconn_options_free(hyper_clientconn_options *opts); + +/* + Set the client background task executor. + + This does not consume the `options` or the `exec`. + */ +void hyper_clientconn_options_exec(hyper_clientconn_options *opts, const hyper_executor *exec); + +/* + Set the whether to use HTTP2. + + Pass `0` to disable, `1` to enable. + */ +hyper_code hyper_clientconn_options_http2(hyper_clientconn_options *opts, int enabled); + +/* + Frees a `hyper_error`. + */ +void hyper_error_free(hyper_error *err); + +/* + Get an equivalent `hyper_code` from this error. + */ +hyper_code hyper_error_code(const hyper_error *err); + +/* + Print the details of this error to a buffer. + + The `dst_len` value must be the maximum length that the buffer can + store. + + The return value is number of bytes that were written to `dst`. + */ +size_t hyper_error_print(const hyper_error *err, uint8_t *dst, size_t dst_len); + +/* + Construct a new HTTP request. + */ +hyper_request *hyper_request_new(void); + +/* + Free an HTTP request if not going to send it on a client. + */ +void hyper_request_free(hyper_request *req); + +/* + Set the HTTP Method of the request. + */ +hyper_code hyper_request_set_method(hyper_request *req, const uint8_t *method, size_t method_len); + +/* + Set the URI of the request. + */ +hyper_code hyper_request_set_uri(hyper_request *req, const uint8_t *uri, size_t uri_len); + +/* + Set the preferred HTTP version of the request. + + The version value should be one of the `HYPER_HTTP_VERSION_` constants. + + Note that this won't change the major HTTP version of the connection, + since that is determined at the handshake step. + */ +hyper_code hyper_request_set_version(hyper_request *req, int version); + +/* + Gets a reference to the HTTP headers of this request + + This is not an owned reference, so it should not be accessed after the + `hyper_request` has been consumed. + */ +hyper_headers *hyper_request_headers(hyper_request *req); + +/* + Set the body of the request. + + The default is an empty body. + + This takes ownership of the `hyper_body *`, you must not use it or + free it after setting it on the request. + */ +hyper_code hyper_request_set_body(hyper_request *req, hyper_body *body); + +/* + Free an HTTP response after using it. + */ +void hyper_response_free(hyper_response *resp); + +/* + Get the HTTP-Status code of this response. + + It will always be within the range of 100-599. + */ +uint16_t hyper_response_status(const hyper_response *resp); + +/* + Get the HTTP version used by this response. + + The returned value could be: + + - `HYPER_HTTP_VERSION_1_0` + - `HYPER_HTTP_VERSION_1_1` + - `HYPER_HTTP_VERSION_2` + - `HYPER_HTTP_VERSION_NONE` if newer (or older). + */ +int hyper_response_version(const hyper_response *resp); + +/* + Gets a reference to the HTTP headers of this response. + + This is not an owned reference, so it should not be accessed after the + `hyper_response` has been freed. + */ +hyper_headers *hyper_response_headers(hyper_response *resp); + +/* + Take ownership of the body of this response. + + It is safe to free the response even after taking ownership of its body. + */ +hyper_body *hyper_response_body(hyper_response *resp); + +/* + Iterates the headers passing each name and value pair to the callback. + + The `userdata` pointer is also passed to the callback. + + The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or + `HYPER_ITER_BREAK` to stop. + */ +void hyper_headers_foreach(const hyper_headers *headers, + hyper_headers_foreach_callback func, + void *userdata); + +/* + Sets the header with the provided name to the provided value. + + This overwrites any previous value set for the header. + */ +hyper_code hyper_headers_set(hyper_headers *headers, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len); + +/* + Adds the provided value to the list of the provided name. + + If there were already existing values for the name, this will append the + new value to the internal list. + */ +hyper_code hyper_headers_add(hyper_headers *headers, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len); + +/* + Create a new IO type used to represent a transport. + + The read and write functions of this transport should be set with + `hyper_io_set_read` and `hyper_io_set_write`. + */ +hyper_io *hyper_io_new(void); + +/* + Free an unused `hyper_io *`. + + This is typically only useful if you aren't going to pass ownership + of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + */ +void hyper_io_free(hyper_io *io); + +/* + Set the user data pointer for this IO to some value. + + This value is passed as an argument to the read and write callbacks. + */ +void hyper_io_set_userdata(hyper_io *io, void *data); + +/* + Set the read function for this IO transport. + + Data that is read from the transport should be put in the `buf` pointer, + up to `buf_len` bytes. The number of bytes read should be the return value. + + It is undefined behavior to try to access the bytes in the `buf` pointer, + unless you have already written them yourself. It is also undefined behavior + to return that more bytes have been written than actually set on the `buf`. + + If there is no data currently available, a waker should be claimed from + the `ctx` and registered with whatever polling mechanism is used to signal + when data is available later on. The return value should be + `HYPER_IO_PENDING`. + + If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + should be the return value. + */ +void hyper_io_set_read(hyper_io *io, hyper_io_read_callback func); + +/* + Set the write function for this IO transport. + + Data from the `buf` pointer should be written to the transport, up to + `buf_len` bytes. The number of bytes written should be the return value. + + If no data can currently be written, the `waker` should be cloned and + registered with whatever polling mechanism is used to signal when data + is available later on. The return value should be `HYPER_IO_PENDING`. + + Yeet. + + If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + should be the return value. + */ +void hyper_io_set_write(hyper_io *io, hyper_io_write_callback func); + +/* + Creates a new task executor. + */ +const hyper_executor *hyper_executor_new(void); + +/* + Frees an executor and any incomplete tasks still part of it. + */ +void hyper_executor_free(const hyper_executor *exec); + +/* + Push a task onto the executor. + + The executor takes ownership of the task, it should not be accessed + again unless returned back to the user with `hyper_executor_poll`. + */ +hyper_code hyper_executor_push(const hyper_executor *exec, hyper_task *task); + +/* + Polls the executor, trying to make progress on any tasks that have notified + that they are ready again. + + If ready, returns a task from the executor that has completed. + + If there are no ready tasks, this returns `NULL`. + */ +hyper_task *hyper_executor_poll(const hyper_executor *exec); + +/* + Free a task. + */ +void hyper_task_free(hyper_task *task); + +/* + Takes the output value of this task. + + This must only be called once polling the task on an executor has finished + this task. + + Use `hyper_task_type` to determine the type of the `void *` return value. + */ +void *hyper_task_value(hyper_task *task); + +/* + Query the return type of this task. + */ +hyper_task_return_type hyper_task_type(hyper_task *task); + +/* + Set a user data pointer to be associated with this task. + + This value will be passed to task callbacks, and can be checked later + with `hyper_task_userdata`. + */ +void hyper_task_set_userdata(hyper_task *task, void *userdata); + +/* + Retrieve the userdata that has been set via `hyper_task_set_userdata`. + */ +void *hyper_task_userdata(hyper_task *task); + +/* + Copies a waker out of the task context. + */ +hyper_waker *hyper_context_waker(hyper_context *cx); + +/* + Free a waker that hasn't been woken. + */ +void hyper_waker_free(hyper_waker *waker); + +/* + Free a waker that hasn't been woken. + */ +void hyper_waker_wake(hyper_waker *waker); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif /* _HYPER_H */ diff --git a/src/body/body.rs b/src/body/body.rs index 4a1d6210bc..e50e9f123e 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -51,6 +51,8 @@ enum Kind { content_length: DecodedLength, recv: h2::RecvStream, }, + #[cfg(feature = "ffi")] + Ffi(crate::ffi::UserBody), #[cfg(feature = "stream")] Wrapped( SyncWrapper< @@ -260,6 +262,21 @@ impl Body { } } + #[cfg(feature = "ffi")] + pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody { + match self.kind { + Kind::Ffi(ref mut body) => return body, + _ => { + self.kind = Kind::Ffi(crate::ffi::UserBody::new()); + } + } + + match self.kind { + Kind::Ffi(ref mut body) => body, + _ => unreachable!(), + } + } + fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll>> { match self.kind { Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)), @@ -294,6 +311,9 @@ impl Body { None => Poll::Ready(None), }, + #[cfg(feature = "ffi")] + Kind::Ffi(ref mut body) => body.poll_data(cx), + #[cfg(feature = "stream")] Kind::Wrapped(ref mut s) => match ready!(s.get_mut().as_mut().poll_next(cx)) { Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))), @@ -348,6 +368,10 @@ impl HttpBody for Body { } Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))), }, + + #[cfg(feature = "ffi")] + Kind::Ffi(ref mut body) => body.poll_trailers(cx), + _ => Poll::Ready(Ok(None)), } } @@ -358,6 +382,8 @@ impl HttpBody for Body { Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO, #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => false, #[cfg(feature = "stream")] Kind::Wrapped(..) => false, } @@ -384,6 +410,8 @@ impl HttpBody for Body { Kind::Chan { content_length, .. } => opt_len!(content_length), #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] Kind::H2 { content_length, .. } => opt_len!(content_length), + #[cfg(feature = "ffi")] + Kind::Ffi(..) => SizeHint::default(), } } } diff --git a/src/error.rs b/src/error.rs index 16bacd163e..68b042f0c8 100644 --- a/src/error.rs +++ b/src/error.rs @@ -116,6 +116,10 @@ pub(crate) enum User { /// User polled for an upgrade, but low-level API is not using upgrades. #[cfg(feature = "http1")] ManualUpgrade, + + /// User aborted in an FFI callback. + #[cfg(feature = "ffi")] + AbortedByCallback, } // Sentinel type to indicate the error was caused by a timeout. @@ -179,8 +183,7 @@ impl Error { self } - #[cfg(feature = "http1")] - #[cfg(feature = "server")] + #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] pub(crate) fn kind(&self) -> &Kind { &self.inner.kind } @@ -336,6 +339,11 @@ impl Error { Error::new(Kind::Shutdown).with(cause) } + #[cfg(feature = "ffi")] + pub(crate) fn new_user_aborted_by_callback() -> Error { + Error::new_user(User::AbortedByCallback) + } + #[cfg(feature = "http2")] pub(crate) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { @@ -406,6 +414,8 @@ impl Error { Kind::User(User::NoUpgrade) => "no upgrade available", #[cfg(feature = "http1")] Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", + #[cfg(feature = "ffi")] + Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", } } } diff --git a/src/ffi/body.rs b/src/ffi/body.rs new file mode 100644 index 0000000000..1c8f1a48c0 --- /dev/null +++ b/src/ffi/body.rs @@ -0,0 +1,233 @@ +use std::ffi::c_void; +use std::mem::ManuallyDrop; +use std::ptr; +use std::task::{Context, Poll}; + +use http::HeaderMap; +use libc::{c_int, size_t}; + +use super::task::{hyper_context, hyper_task_return_type, AsTaskType, Task}; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; +use crate::body::{Body, Bytes, HttpBody as _}; + +pub struct hyper_body(pub(super) Body); + +pub struct hyper_buf(pub(super) Bytes); + +pub(crate) struct UserBody { + data_func: hyper_body_data_callback, + userdata: *mut c_void, +} + +// ===== Body ===== + +type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int; + +type hyper_body_data_callback = + extern "C" fn(*mut c_void, *mut hyper_context, *mut *mut hyper_buf) -> c_int; + +ffi_fn! { + /// Create a new "empty" body. + /// + /// If not configured, this body acts as an empty payload. + fn hyper_body_new() -> *mut hyper_body { + Box::into_raw(Box::new(hyper_body(Body::empty()))) + } +} + +ffi_fn! { + /// Free a `hyper_body *`. + fn hyper_body_free(body: *mut hyper_body) { + if body.is_null() { + return; + } + + drop(unsafe { Box::from_raw(body) }); + } +} + +ffi_fn! { + /// Return a task that will poll the body for the next buffer of data. + /// + /// The task value may have different types depending on the outcome: + /// + /// - `HYPER_TASK_BUF`: Success, and more data was received. + /// - `HYPER_TASK_ERROR`: An error retrieving the data. + /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. + /// + /// This does not consume the `hyper_body *`, so it may be used to again. + /// However, it MUST NOT be used or freed until the related task completes. + fn hyper_body_data(body: *mut hyper_body) -> *mut Task { + // This doesn't take ownership of the Body, so don't allow destructor + let mut body = ManuallyDrop::new(unsafe { Box::from_raw(body) }); + + Box::into_raw(Task::boxed(async move { + body.0.data().await.map(|res| res.map(hyper_buf)) + })) + } +} + +ffi_fn! { + /// Return a task that will poll the body and execute the callback with each + /// body chunk that is received. + /// + /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside + /// the execution of the callback. You must make a copy to retain it. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating + /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. + /// + /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. + fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut Task { + if body.is_null() { + return ptr::null_mut(); + } + + let mut body = unsafe { Box::from_raw(body) }; + let userdata = UserDataPointer(userdata); + + Box::into_raw(Task::boxed(async move { + while let Some(item) = body.0.data().await { + let chunk = item?; + if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { + return Err(crate::Error::new_user_aborted_by_callback()); + } + } + Ok(()) + })) + } +} + +ffi_fn! { + /// Set userdata on this body, which will be passed to callback functions. + fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) { + let b = unsafe { &mut *body }; + b.0.as_ffi_mut().userdata = userdata; + } +} + +ffi_fn! { + /// Set the data callback for this body. + /// + /// The callback is called each time hyper needs to send more data for the + /// body. It is passed the value from `hyper_body_set_userdata`. + /// + /// If there is data available, the `hyper_buf **` argument should be set + /// to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should + /// be returned. + /// + /// Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points + /// to `NULL` will indicate the body has completed all data. + /// + /// If there is more data to send, but it isn't yet available, a + /// `hyper_waker` should be saved from the `hyper_context *` argument, and + /// `HYPER_POLL_PENDING` should be returned. You must wake the saved waker + /// to signal the task when data is available. + /// + /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort + /// the body. + fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) { + let b = unsafe { &mut *body }; + b.0.as_ffi_mut().data_func = func; + } +} + +// ===== impl UserBody ===== + +impl UserBody { + pub(crate) fn new() -> UserBody { + UserBody { + data_func: data_noop, + userdata: std::ptr::null_mut(), + } + } + + pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { + let mut out = std::ptr::null_mut(); + match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) { + super::task::HYPER_POLL_READY => { + if out.is_null() { + Poll::Ready(None) + } else { + let buf = unsafe { Box::from_raw(out) }; + Poll::Ready(Some(Ok(buf.0))) + } + } + super::task::HYPER_POLL_PENDING => Poll::Pending, + super::task::HYPER_POLL_ERROR => { + Poll::Ready(Some(Err(crate::Error::new_body_write_aborted()))) + } + unexpected => Poll::Ready(Some(Err(crate::Error::new_body_write(format!( + "unexpected hyper_body_data_func return code {}", + unexpected + ))))), + } + } + + pub(crate) fn poll_trailers( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll>> { + Poll::Ready(Ok(None)) + } +} + +/// cbindgen:ignore +extern "C" fn data_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _: *mut *mut hyper_buf, +) -> c_int { + super::task::HYPER_POLL_READY +} + +unsafe impl Send for UserBody {} +unsafe impl Sync for UserBody {} + +// ===== Bytes ===== + +ffi_fn! { + /// Create a new `hyper_buf *` by copying the provided bytes. + /// + /// This makes an owned copy of the bytes, so the `buf` argument can be + /// freed or changed afterwards. + fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { + let slice = unsafe { + std::slice::from_raw_parts(buf, len) + }; + Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice)))) + } +} + +ffi_fn! { + /// Get a pointer to the bytes in this buffer. + /// + /// This should be used in conjunction with `hyper_buf_len` to get the length + /// of the bytes data. + /// + /// This pointer is borrowed data, and not valid once the `hyper_buf` is + /// consumed/freed. + fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 { + unsafe { (*buf).0.as_ptr() } + } +} + +ffi_fn! { + /// Get the length of the bytes this buffer contains. + fn hyper_buf_len(buf: *const hyper_buf) -> size_t { + unsafe { (*buf).0.len() } + } +} + +ffi_fn! { + /// Free this buffer. + fn hyper_buf_free(buf: *mut hyper_buf) { + drop(unsafe { Box::from_raw(buf) }); + } +} + +unsafe impl AsTaskType for hyper_buf { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_BUF + } +} diff --git a/src/ffi/client.rs b/src/ffi/client.rs new file mode 100644 index 0000000000..2c2ef6b262 --- /dev/null +++ b/src/ffi/client.rs @@ -0,0 +1,148 @@ +use std::sync::Arc; + +use libc::c_int; + +use crate::client::conn; +use crate::rt::Executor as _; + +use super::error::hyper_code; +use super::http_types::{hyper_request, hyper_response}; +use super::io::Io; +use super::task::{hyper_task_return_type, AsTaskType, Exec, Task, WeakExec}; + +pub struct hyper_clientconn_options { + builder: conn::Builder, + /// Use a `Weak` to prevent cycles. + exec: WeakExec, +} + +pub struct hyper_clientconn { + tx: conn::SendRequest, +} + +// ===== impl hyper_clientconn ===== + +ffi_fn! { + /// Starts an HTTP client connection handshake using the provided IO transport + /// and options. + /// + /// Both the `io` and the `options` are consumed in this function call. + /// + /// The returned `hyper_task *` must be polled with an executor until the + /// handshake completes, at which point the value can be taken. + fn hyper_clientconn_handshake(io: *mut Io, options: *mut hyper_clientconn_options) -> *mut Task { + if io.is_null() { + return std::ptr::null_mut(); + } + if options.is_null() { + return std::ptr::null_mut(); + } + + let options = unsafe { Box::from_raw(options) }; + let io = unsafe { Box::from_raw(io) }; + + Box::into_raw(Task::boxed(async move { + options.builder.handshake::<_, crate::Body>(io) + .await + .map(|(tx, conn)| { + options.exec.execute(Box::pin(async move { + let _ = conn.await; + })); + hyper_clientconn { tx } + }) + })) + } +} + +ffi_fn! { + /// Send a request on the client connection. + /// + /// Returns a task that needs to be polled until it is ready. When ready, the + /// task yields a `hyper_response *`. + fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut Task { + if conn.is_null() { + return std::ptr::null_mut(); + } + if req.is_null() { + return std::ptr::null_mut(); + } + + let req = unsafe { Box::from_raw(req) }; + let fut = unsafe { &mut *conn }.tx.send_request(req.0); + + let fut = async move { + fut.await.map(hyper_response) + }; + + Box::into_raw(Task::boxed(fut)) + } +} + +ffi_fn! { + /// Free a `hyper_clientconn *`. + fn hyper_clientconn_free(conn: *mut hyper_clientconn) { + drop(unsafe { Box::from_raw(conn) }); + } +} + +unsafe impl AsTaskType for hyper_clientconn { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_CLIENTCONN + } +} + +// ===== impl hyper_clientconn_options ===== + +ffi_fn! { + /// Creates a new set of HTTP clientconn options to be used in a handshake. + fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { + Box::into_raw(Box::new(hyper_clientconn_options { + builder: conn::Builder::new(), + exec: WeakExec::new(), + })) + } +} + +ffi_fn! { + /// Free a `hyper_clientconn_options *`. + fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { + drop(unsafe { Box::from_raw(opts) }); + } +} + +ffi_fn! { + /// Set the client background task executor. + /// + /// This does not consume the `options` or the `exec`. + fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const Exec) { + let opts = unsafe { &mut *opts }; + + let exec = unsafe { Arc::from_raw(exec) }; + let weak_exec = Exec::downgrade(&exec); + std::mem::forget(exec); + + opts.builder.executor(weak_exec.clone()); + opts.exec = weak_exec; + } +} + +ffi_fn! { + /// Set the whether to use HTTP2. + /// + /// Pass `0` to disable, `1` to enable. + fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + #[cfg(feature = "http2")] + { + let opts = unsafe { &mut *opts }; + opts.builder.http2_only(enabled != 0); + hyper_code::HYPERE_OK + } + + #[cfg(not(feature = "http2"))] + { + drop(opts); + drop(enabled); + hyper_code::HYPERE_FEATURE_NOT_ENABLED + } + } +} diff --git a/src/ffi/error.rs b/src/ffi/error.rs new file mode 100644 index 0000000000..8cd672fe1e --- /dev/null +++ b/src/ffi/error.rs @@ -0,0 +1,80 @@ +use libc::size_t; + +pub struct hyper_error(crate::Error); + +#[repr(C)] +pub enum hyper_code { + /// All is well. + HYPERE_OK, + /// General error, details in the `hyper_error *`. + HYPERE_ERROR, + /// A function argument was invalid. + HYPERE_INVALID_ARG, + /// The IO transport returned an EOF when one wasn't expected. + /// + /// This typically means an HTTP request or response was expected, but the + /// connection closed cleanly without sending (all of) it. + HYPERE_UNEXPECTED_EOF, + /// Aborted by a user supplied callback. + HYPERE_ABORTED_BY_CALLBACK, + /// An optional hyper feature was not enabled. + #[cfg_attr(feature = "http2", allow(unused))] + HYPERE_FEATURE_NOT_ENABLED, +} + +// ===== impl hyper_error ===== + +impl hyper_error { + fn code(&self) -> hyper_code { + use crate::error::Kind as ErrorKind; + use crate::error::User; + + match self.0.kind() { + ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, + ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, + // TODO: add more variants + _ => hyper_code::HYPERE_ERROR + } + } + + fn print_to(&self, dst: &mut [u8]) -> usize { + use std::io::Write; + + let mut dst = std::io::Cursor::new(dst); + + // A write! error doesn't matter. As much as possible will have been + // written, and the Cursor position will know how far that is (even + // if that is zero). + let _ = write!(dst, "{}", &self.0); + dst.position() as usize + } +} + +ffi_fn! { + /// Frees a `hyper_error`. + fn hyper_error_free(err: *mut hyper_error) { + drop(unsafe { Box::from_raw(err) }); + } +} + +ffi_fn! { + /// Get an equivalent `hyper_code` from this error. + fn hyper_error_code(err: *const hyper_error) -> hyper_code { + unsafe { &*err }.code() + } +} + +ffi_fn! { + /// Print the details of this error to a buffer. + /// + /// The `dst_len` value must be the maximum length that the buffer can + /// store. + /// + /// The return value is number of bytes that were written to `dst`. + fn hyper_error_print(err: *const hyper_error, dst: *mut u8, dst_len: size_t) -> size_t { + let dst = unsafe { + std::slice::from_raw_parts_mut(dst, dst_len) + }; + unsafe { &*err }.print_to(dst) + } +} diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs new file mode 100644 index 0000000000..49e2027cac --- /dev/null +++ b/src/ffi/http_types.rs @@ -0,0 +1,267 @@ +use libc::{c_int, size_t}; +use std::ffi::c_void; + +use super::body::hyper_body; +use super::error::hyper_code; +use super::task::{hyper_task_return_type, AsTaskType}; +use super::HYPER_ITER_CONTINUE; +use crate::header::{HeaderName, HeaderValue}; +use crate::{Body, HeaderMap, Method, Request, Response, Uri}; + +// ===== impl Request ===== + +pub struct hyper_request(pub(super) Request); + +pub struct hyper_response(pub(super) Response); + +pub struct hyper_headers(pub(super) HeaderMap); + +ffi_fn! { + /// Construct a new HTTP request. + fn hyper_request_new() -> *mut hyper_request { + Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) + } +} + +ffi_fn! { + /// Free an HTTP request if not going to send it on a client. + fn hyper_request_free(req: *mut hyper_request) { + drop(unsafe { Box::from_raw(req) }); + } +} + +ffi_fn! { + /// Set the HTTP Method of the request. + fn hyper_request_set_method(req: *mut hyper_request, method: *const u8, method_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(method, method_len as usize) + }; + match Method::from_bytes(bytes) { + Ok(m) => { + *unsafe { &mut *req }.0.method_mut() = m; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the URI of the request. + fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code { + let bytes = unsafe { + std::slice::from_raw_parts(uri, uri_len as usize) + }; + match Uri::from_maybe_shared(bytes) { + Ok(u) => { + *unsafe { &mut *req }.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + +ffi_fn! { + /// Set the preferred HTTP version of the request. + /// + /// The version value should be one of the `HYPER_HTTP_VERSION_` constants. + /// + /// Note that this won't change the major HTTP version of the connection, + /// since that is determined at the handshake step. + fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code { + use http::Version; + + *unsafe { &mut *req }.0.version_mut() = match version { + super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11, + super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10, + super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11, + super::HYPER_HTTP_VERSION_2 => Version::HTTP_2, + _ => { + // We don't know this version + return hyper_code::HYPERE_INVALID_ARG; + } + }; + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this request + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_request` has been consumed. + fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { + hyper_headers::wrap(unsafe { &mut *req }.0.headers_mut()) + } +} + +ffi_fn! { + /// Set the body of the request. + /// + /// The default is an empty body. + /// + /// This takes ownership of the `hyper_body *`, you must not use it or + /// free it after setting it on the request. + fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code { + let body = unsafe { Box::from_raw(body) }; + *unsafe { &mut *req }.0.body_mut() = body.0; + hyper_code::HYPERE_OK + } +} + +// ===== impl Response ===== + +ffi_fn! { + /// Free an HTTP response after using it. + fn hyper_response_free(resp: *mut hyper_response) { + drop(unsafe { Box::from_raw(resp) }); + } +} + +ffi_fn! { + /// Get the HTTP-Status code of this response. + /// + /// It will always be within the range of 100-599. + fn hyper_response_status(resp: *const hyper_response) -> u16 { + unsafe { &*resp }.0.status().as_u16() + } +} + +ffi_fn! { + /// Get the HTTP version used by this response. + /// + /// The returned value could be: + /// + /// - `HYPER_HTTP_VERSION_1_0` + /// - `HYPER_HTTP_VERSION_1_1` + /// - `HYPER_HTTP_VERSION_2` + /// - `HYPER_HTTP_VERSION_NONE` if newer (or older). + fn hyper_response_version(resp: *const hyper_response) -> c_int { + use http::Version; + + match unsafe { &*resp }.0.version() { + Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0, + Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1, + Version::HTTP_2 => super::HYPER_HTTP_VERSION_2, + _ => super::HYPER_HTTP_VERSION_NONE, + } + } +} + +ffi_fn! { + /// Gets a reference to the HTTP headers of this response. + /// + /// This is not an owned reference, so it should not be accessed after the + /// `hyper_response` has been freed. + fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { + hyper_headers::wrap(unsafe { &mut *resp }.0.headers_mut()) + } +} + +ffi_fn! { + /// Take ownership of the body of this response. + /// + /// It is safe to free the response even after taking ownership of its body. + fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { + let body = std::mem::take(unsafe { &mut *resp }.0.body_mut()); + Box::into_raw(Box::new(hyper_body(body))) + } +} + +unsafe impl AsTaskType for hyper_response { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_RESPONSE + } +} + +// ===== impl Headers ===== + +type hyper_headers_foreach_callback = + extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int; + +impl hyper_headers { + pub(crate) fn wrap(cx: &mut HeaderMap) -> &mut hyper_headers { + // A struct with only one field has the same layout as that field. + unsafe { std::mem::transmute::<&mut HeaderMap, &mut hyper_headers>(cx) } + } +} + +ffi_fn! { + /// Iterates the headers passing each name and value pair to the callback. + /// + /// The `userdata` pointer is also passed to the callback. + /// + /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or + /// `HYPER_ITER_BREAK` to stop. + fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { + for (name, value) in unsafe { &*headers }.0.iter() { + let name_ptr = name.as_str().as_bytes().as_ptr(); + let name_len = name.as_str().as_bytes().len(); + let val_ptr = value.as_bytes().as_ptr(); + let val_len = value.as_bytes().len(); + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + break; + } + } + } +} + +ffi_fn! { + /// Sets the header with the provided name to the provided value. + /// + /// This overwrites any previous value set for the header. + fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = unsafe { &mut *headers }; + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value)) => { + headers.0.insert(name, value); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +ffi_fn! { + /// Adds the provided value to the list of the provided name. + /// + /// If there were already existing values for the name, this will append the + /// new value to the internal list. + fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { + let headers = unsafe { &mut *headers }; + + match unsafe { raw_name_value(name, name_len, value, value_len) } { + Ok((name, value)) => { + headers.0.append(name, value); + hyper_code::HYPERE_OK + } + Err(code) => code, + } + } +} + +unsafe fn raw_name_value( + name: *const u8, + name_len: size_t, + value: *const u8, + value_len: size_t, +) -> Result<(HeaderName, HeaderValue), hyper_code> { + let name = std::slice::from_raw_parts(name, name_len); + let name = match HeaderName::from_bytes(name) { + Ok(name) => name, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + let value = std::slice::from_raw_parts(value, value_len); + let value = match HeaderValue::from_bytes(value) { + Ok(val) => val, + Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), + }; + + Ok((name, value)) +} diff --git a/src/ffi/io.rs b/src/ffi/io.rs new file mode 100644 index 0000000000..5d84168486 --- /dev/null +++ b/src/ffi/io.rs @@ -0,0 +1,173 @@ +use std::ffi::c_void; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use libc::size_t; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::task::hyper_context; + +pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; +pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; + +type hyper_io_read_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut u8, size_t) -> size_t; +type hyper_io_write_callback = + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; + +pub struct Io { + read: hyper_io_read_callback, + write: hyper_io_write_callback, + userdata: *mut c_void, +} + +ffi_fn! { + /// Create a new IO type used to represent a transport. + /// + /// The read and write functions of this transport should be set with + /// `hyper_io_set_read` and `hyper_io_set_write`. + fn hyper_io_new() -> *mut Io { + Box::into_raw(Box::new(Io { + read: read_noop, + write: write_noop, + userdata: std::ptr::null_mut(), + })) + } +} + +ffi_fn! { + /// Free an unused `hyper_io *`. + /// + /// This is typically only useful if you aren't going to pass ownership + /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + fn hyper_io_free(io: *mut Io) { + drop(unsafe { Box::from_raw(io) }); + } +} + +ffi_fn! { + /// Set the user data pointer for this IO to some value. + /// + /// This value is passed as an argument to the read and write callbacks. + fn hyper_io_set_userdata(io: *mut Io, data: *mut c_void) { + unsafe { &mut *io }.userdata = data; + } +} + +ffi_fn! { + /// Set the read function for this IO transport. + /// + /// Data that is read from the transport should be put in the `buf` pointer, + /// up to `buf_len` bytes. The number of bytes read should be the return value. + /// + /// It is undefined behavior to try to access the bytes in the `buf` pointer, + /// unless you have already written them yourself. It is also undefined behavior + /// to return that more bytes have been written than actually set on the `buf`. + /// + /// If there is no data currently available, a waker should be claimed from + /// the `ctx` and registered with whatever polling mechanism is used to signal + /// when data is available later on. The return value should be + /// `HYPER_IO_PENDING`. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_read(io: *mut Io, func: hyper_io_read_callback) { + unsafe { &mut *io }.read = func; + } +} + +ffi_fn! { + /// Set the write function for this IO transport. + /// + /// Data from the `buf` pointer should be written to the transport, up to + /// `buf_len` bytes. The number of bytes written should be the return value. + /// + /// If no data can currently be written, the `waker` should be cloned and + /// registered with whatever polling mechanism is used to signal when data + /// is available later on. The return value should be `HYPER_IO_PENDING`. + /// + /// Yeet. + /// + /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` + /// should be the return value. + fn hyper_io_set_write(io: *mut Io, func: hyper_io_write_callback) { + unsafe { &mut *io }.write = func; + } +} + +/// cbindgen:ignore +extern "C" fn read_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *mut u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +/// cbindgen:ignore +extern "C" fn write_noop( + _userdata: *mut c_void, + _: *mut hyper_context<'_>, + _buf: *const u8, + _buf_len: size_t, +) -> size_t { + 0 +} + +impl AsyncRead for Io { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; + let buf_len = buf.remaining(); + + match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => { + // We have to trust that the user's read callback actually + // filled in that many bytes... :( + unsafe { buf.assume_init(ok) }; + buf.advance(ok); + Poll::Ready(Ok(())) + } + } + } +} + +impl AsyncWrite for Io { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let buf_ptr = buf.as_ptr(); + let buf_len = buf.len(); + + match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { + HYPER_IO_PENDING => Poll::Pending, + HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::Other, + "io error", + ))), + ok => Poll::Ready(Ok(ok)), + } + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +unsafe impl Send for Io {} +unsafe impl Sync for Io {} diff --git a/src/ffi/macros.rs b/src/ffi/macros.rs new file mode 100644 index 0000000000..f4e031a07d --- /dev/null +++ b/src/ffi/macros.rs @@ -0,0 +1,23 @@ +macro_rules! ffi_fn { + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { + $(#[$doc])* + #[no_mangle] + pub extern fn $name($($arg: $arg_ty),*) -> $ret { + use std::panic::{self, AssertUnwindSafe}; + + match panic::catch_unwind(AssertUnwindSafe(move || $body)) { + Ok(v) => v, + Err(_) => { + // TODO: We shouldn't abort, but rather figure out how to + // convert into the return type that the function errored. + eprintln!("panic unwind caught, aborting"); + std::process::abort(); + } + } + } + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); + }; +} diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs new file mode 100644 index 0000000000..cee653d7d0 --- /dev/null +++ b/src/ffi/mod.rs @@ -0,0 +1,55 @@ +// We have a lot of c-types in here, stop warning about their names! +#![allow(non_camel_case_types)] + +// We may eventually allow the FFI to be enabled without `client` or `http1`, +// that is why we don't auto enable them as `ffi = ["client", "http1"]` in +// the `Cargo.toml`. +// +// But for now, give a clear message that this compile error is expected. +#[cfg(not(all(feature = "client", feature = "http1")))] +compile_error!("The `ffi` feature currently requires the `client` and `http1` features."); + +#[cfg(not(hyper_unstable_ffi))] +compile_error!( + "\ + The `ffi` feature is unstable, and requires the \ + `RUSTFLAGS='--cfg hyper_unstable_ffi'` environment variable to be set.\ +" +); + +#[macro_use] +mod macros; + +mod body; +mod client; +mod error; +mod http_types; +mod io; +mod task; + +pub(crate) use self::body::UserBody; + +pub const HYPER_ITER_CONTINUE: libc::c_int = 0; +#[allow(unused)] +pub const HYPER_ITER_BREAK: libc::c_int = 1; + +pub const HYPER_HTTP_VERSION_NONE: libc::c_int = 0; +pub const HYPER_HTTP_VERSION_1_0: libc::c_int = 10; +pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; +pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; + +struct UserDataPointer(*mut std::ffi::c_void); + +// We don't actually know anything about this pointer, it's up to the user +// to do the right thing. +unsafe impl Send for UserDataPointer {} + +/// cbindgen:ignore +static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); + +ffi_fn! { + /// Returns a static ASCII (null terminated) string of the hyper version. + fn hyper_version() -> *const libc::c_char { + VERSION_CSTR.as_ptr() as _ + } +} diff --git a/src/ffi/task.rs b/src/ffi/task.rs new file mode 100644 index 0000000000..61641bd193 --- /dev/null +++ b/src/ffi/task.rs @@ -0,0 +1,415 @@ +use std::ffi::c_void; +use std::future::Future; +use std::pin::Pin; +use std::ptr; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, Weak, +}; +use std::task::{Context, Poll}; + +use futures_util::stream::{FuturesUnordered, Stream}; +use libc::c_int; + +use super::error::hyper_code; +use super::UserDataPointer; + +type BoxFuture = Pin + Send>>; +type BoxAny = Box; + +pub const HYPER_POLL_READY: c_int = 0; +pub const HYPER_POLL_PENDING: c_int = 1; +pub const HYPER_POLL_ERROR: c_int = 3; + +pub struct Exec { + /// The executor of all task futures. + /// + /// There should never be contention on the mutex, as it is only locked + /// to drive the futures. However, we cannot gaurantee proper usage from + /// `hyper_executor_poll()`, which in C could potentially be called inside + /// one of the stored futures. The mutex isn't re-entrant, so doing so + /// would result in a deadlock, but that's better than data corruption. + driver: Mutex>, + + /// The queue of futures that need to be pushed into the `driver`. + /// + /// This is has a separate mutex since `spawn` could be called from inside + /// a future, which would mean the driver's mutex is already locked. + spawn_queue: Mutex>, + + /// This is used to track when a future calls `wake` while we are within + /// `Exec::poll_next`. + is_woken: Arc, +} + +#[derive(Clone)] +pub(crate) struct WeakExec(Weak); + +struct ExecWaker(AtomicBool); + +pub struct Task { + future: BoxFuture, + output: Option, + userdata: UserDataPointer, +} + +struct TaskFuture { + task: Option>, +} + +pub struct hyper_context<'a>(Context<'a>); + +pub struct hyper_waker { + waker: std::task::Waker, +} + +#[repr(C)] +pub enum hyper_task_return_type { + /// The value of this task is null (does not imply an error). + HYPER_TASK_EMPTY, + /// The value of this task is `hyper_error *`. + HYPER_TASK_ERROR, + /// The value of this task is `hyper_clientconn *`. + HYPER_TASK_CLIENTCONN, + /// The value of this task is `hyper_response *`. + HYPER_TASK_RESPONSE, + /// The value of this task is `hyper_buf *`. + HYPER_TASK_BUF, +} + +pub(crate) unsafe trait AsTaskType { + fn as_task_type(&self) -> hyper_task_return_type; +} + +pub(crate) trait IntoDynTaskType { + fn into_dyn_task_type(self) -> BoxAny; +} + +// ===== impl Exec ===== + +impl Exec { + fn new() -> Arc { + Arc::new(Exec { + driver: Mutex::new(FuturesUnordered::new()), + spawn_queue: Mutex::new(Vec::new()), + is_woken: Arc::new(ExecWaker(AtomicBool::new(false))), + }) + } + + pub(crate) fn downgrade(exec: &Arc) -> WeakExec { + WeakExec(Arc::downgrade(exec)) + } + + fn spawn(&self, task: Box) { + self.spawn_queue + .lock() + .unwrap() + .push(TaskFuture { task: Some(task) }); + } + + fn poll_next(&self) -> Option> { + // Drain the queue first. + self.drain_queue(); + + let waker = futures_util::task::waker_ref(&self.is_woken); + let mut cx = Context::from_waker(&waker); + + loop { + match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) { + Poll::Ready(val) => return val, + Poll::Pending => { + // Check if any of the pending tasks tried to spawn + // some new tasks. If so, drain into the driver and loop. + if self.drain_queue() { + continue; + } + + // If the driver called `wake` while we were polling, + // we should poll again immediately! + if self.is_woken.0.swap(false, Ordering::SeqCst) { + continue; + } + + return None; + } + } + } + } + + fn drain_queue(&self) -> bool { + let mut queue = self.spawn_queue.lock().unwrap(); + if queue.is_empty() { + return false; + } + + let driver = self.driver.lock().unwrap(); + + for task in queue.drain(..) { + driver.push(task); + } + + true + } +} + +impl futures_util::task::ArcWake for ExecWaker { + fn wake_by_ref(me: &Arc) { + me.0.store(true, Ordering::SeqCst); + } +} + +// ===== impl WeakExec ===== + +impl WeakExec { + pub(crate) fn new() -> Self { + WeakExec(Weak::new()) + } +} + +impl crate::rt::Executor> for WeakExec { + fn execute(&self, fut: BoxFuture<()>) { + if let Some(exec) = self.0.upgrade() { + exec.spawn(Task::boxed(fut)); + } + } +} + +ffi_fn! { + /// Creates a new task executor. + fn hyper_executor_new() -> *const Exec { + Arc::into_raw(Exec::new()) + } +} + +ffi_fn! { + /// Frees an executor and any incomplete tasks still part of it. + fn hyper_executor_free(exec: *const Exec) { + drop(unsafe { Arc::from_raw(exec) }); + } +} + +ffi_fn! { + /// Push a task onto the executor. + /// + /// The executor takes ownership of the task, it should not be accessed + /// again unless returned back to the user with `hyper_executor_poll`. + fn hyper_executor_push(exec: *const Exec, task: *mut Task) -> hyper_code { + if exec.is_null() || task.is_null() { + return hyper_code::HYPERE_INVALID_ARG; + } + let exec = unsafe { &*exec }; + let task = unsafe { Box::from_raw(task) }; + exec.spawn(task); + hyper_code::HYPERE_OK + } +} + +ffi_fn! { + /// Polls the executor, trying to make progress on any tasks that have notified + /// that they are ready again. + /// + /// If ready, returns a task from the executor that has completed. + /// + /// If there are no ready tasks, this returns `NULL`. + fn hyper_executor_poll(exec: *const Exec) -> *mut Task { + // We only want an `&Arc` in here, so wrap in a `ManuallyDrop` so we + // don't accidentally trigger a ref_dec of the Arc. + let exec = unsafe { &*exec }; + match exec.poll_next() { + Some(task) => Box::into_raw(task), + None => ptr::null_mut(), + } + } +} + +// ===== impl Task ===== + +impl Task { + pub(crate) fn boxed(fut: F) -> Box + where + F: Future + Send + 'static, + F::Output: IntoDynTaskType + Send + Sync + 'static, + { + Box::new(Task { + future: Box::pin(async move { fut.await.into_dyn_task_type() }), + output: None, + userdata: UserDataPointer(ptr::null_mut()), + }) + } + + fn output_type(&self) -> hyper_task_return_type { + match self.output { + None => hyper_task_return_type::HYPER_TASK_EMPTY, + Some(ref val) => val.as_task_type(), + } + } +} + +impl Future for TaskFuture { + type Output = Box; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) { + Poll::Ready(val) => { + let mut task = self.task.take().unwrap(); + task.output = Some(val); + Poll::Ready(task) + } + Poll::Pending => Poll::Pending, + } + } +} + +ffi_fn! { + /// Free a task. + fn hyper_task_free(task: *mut Task) { + drop(unsafe { Box::from_raw(task) }); + } +} + +ffi_fn! { + /// Takes the output value of this task. + /// + /// This must only be called once polling the task on an executor has finished + /// this task. + /// + /// Use `hyper_task_type` to determine the type of the `void *` return value. + fn hyper_task_value(task: *mut Task) -> *mut c_void { + if task.is_null() { + return ptr::null_mut(); + } + + let task = unsafe { &mut *task }; + + if let Some(val) = task.output.take() { + let p = Box::into_raw(val) as *mut c_void; + // protect from returning fake pointers to empty types + if p == std::ptr::NonNull::::dangling().as_ptr() { + ptr::null_mut() + } else { + p + } + } else { + ptr::null_mut() + } + } +} + +ffi_fn! { + /// Query the return type of this task. + fn hyper_task_type(task: *mut Task) -> hyper_task_return_type { + if task.is_null() { + // instead of blowing up spectacularly, just say this null task + // doesn't have a value to retrieve. + return hyper_task_return_type::HYPER_TASK_EMPTY; + } + + unsafe { &*task }.output_type() + } +} + +ffi_fn! { + /// Set a user data pointer to be associated with this task. + /// + /// This value will be passed to task callbacks, and can be checked later + /// with `hyper_task_userdata`. + fn hyper_task_set_userdata(task: *mut Task, userdata: *mut c_void) { + if task.is_null() { + return; + } + + unsafe { (*task).userdata = UserDataPointer(userdata) }; + } +} + +ffi_fn! { + /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. + fn hyper_task_userdata(task: *mut Task) -> *mut c_void { + if task.is_null() { + return ptr::null_mut(); + } + + unsafe { &*task }.userdata.0 + } +} + +// ===== impl AsTaskType ===== + +unsafe impl AsTaskType for () { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_EMPTY + } +} + +unsafe impl AsTaskType for crate::Error { + fn as_task_type(&self) -> hyper_task_return_type { + hyper_task_return_type::HYPER_TASK_ERROR + } +} + +impl IntoDynTaskType for T +where + T: AsTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + Box::new(self) + } +} + +impl IntoDynTaskType for crate::Result +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Ok(val) => val.into_dyn_task_type(), + Err(err) => Box::new(err), + } + } +} + +impl IntoDynTaskType for Option +where + T: IntoDynTaskType + Send + Sync + 'static, +{ + fn into_dyn_task_type(self) -> BoxAny { + match self { + Some(val) => val.into_dyn_task_type(), + None => ().into_dyn_task_type(), + } + } +} + +// ===== impl hyper_context ===== + +impl hyper_context<'_> { + pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> { + // A struct with only one field has the same layout as that field. + unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) } + } +} + +ffi_fn! { + /// Copies a waker out of the task context. + fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { + let waker = unsafe { &mut *cx }.0.waker().clone(); + Box::into_raw(Box::new(hyper_waker { waker })) + } +} + +// ===== impl hyper_waker ===== + +ffi_fn! { + /// Free a waker that hasn't been woken. + fn hyper_waker_free(waker: *mut hyper_waker) { + drop(unsafe { Box::from_raw(waker) }); + } +} + +ffi_fn! { + /// Free a waker that hasn't been woken. + fn hyper_waker_wake(waker: *mut hyper_waker) { + let waker = unsafe { Box::from_raw(waker) }; + waker.waker.wake(); + } +} diff --git a/src/lib.rs b/src/lib.rs index 7268725e5f..e9b04229ab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,6 +87,9 @@ pub mod rt; pub mod service; pub mod upgrade; +#[cfg(feature = "ffi")] +mod ffi; + cfg_proto! { mod headers; mod proto; diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index b9f658af9f..ab8616fec6 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -58,10 +58,10 @@ cfg_client! { impl Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, @@ -405,10 +405,10 @@ where impl Future for Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, From 4c32daeea00b5ba6621a2ab9142c08f6ac9fe7ae Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 7 Jan 2021 17:22:12 -0800 Subject: [PATCH 008/420] refactor(ffi): Add HeaderCaseMap preserving http1 header casing --- capi/examples/client.c | 2 +- src/ffi/body.rs | 2 +- src/ffi/client.rs | 8 +- src/ffi/error.rs | 2 +- src/ffi/http_types.rs | 179 ++++++++++++++++++++++++++++++++++----- src/ffi/mod.rs | 1 + src/proto/h1/conn.rs | 16 ++++ src/proto/h1/dispatch.rs | 4 +- src/proto/h1/io.rs | 4 + src/proto/h1/mod.rs | 2 + src/proto/h1/role.rs | 137 ++++++++++++++++++++++++++++-- src/proto/mod.rs | 1 - 12 files changed, 322 insertions(+), 36 deletions(-) diff --git a/capi/examples/client.c b/capi/examples/client.c index 6ed66a46db..f8f1805fb7 100644 --- a/capi/examples/client.c +++ b/capi/examples/client.c @@ -228,7 +228,7 @@ int main(int argc, char *argv[]) { } hyper_headers *req_headers = hyper_request_headers(req); - hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); + hyper_headers_set(req_headers, STR_ARG("Host"), STR_ARG(host)); // Send it! hyper_task *send = hyper_clientconn_send(client, req); diff --git a/src/ffi/body.rs b/src/ffi/body.rs index 1c8f1a48c0..14013fc3d0 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -24,7 +24,7 @@ pub(crate) struct UserBody { type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int; type hyper_body_data_callback = - extern "C" fn(*mut c_void, *mut hyper_context, *mut *mut hyper_buf) -> c_int; + extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int; ffi_fn! { /// Create a new "empty" body. diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 2c2ef6b262..def4644141 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -67,11 +67,15 @@ ffi_fn! { return std::ptr::null_mut(); } - let req = unsafe { Box::from_raw(req) }; + let mut req = unsafe { Box::from_raw(req) }; + + // Update request with original-case map of headers + req.finalize_request(); + let fut = unsafe { &mut *conn }.tx.send_request(req.0); let fut = async move { - fut.await.map(hyper_response) + fut.await.map(hyper_response::wrap) }; Box::into_raw(Task::boxed(fut)) diff --git a/src/ffi/error.rs b/src/ffi/error.rs index 8cd672fe1e..5dfca54ea9 100644 --- a/src/ffi/error.rs +++ b/src/ffi/error.rs @@ -33,7 +33,7 @@ impl hyper_error { ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, // TODO: add more variants - _ => hyper_code::HYPERE_ERROR + _ => hyper_code::HYPERE_ERROR, } } diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index 49e2027cac..fdf645cac1 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -1,3 +1,4 @@ +use bytes::Bytes; use libc::{c_int, size_t}; use std::ffi::c_void; @@ -8,13 +9,21 @@ use super::HYPER_ITER_CONTINUE; use crate::header::{HeaderName, HeaderValue}; use crate::{Body, HeaderMap, Method, Request, Response, Uri}; -// ===== impl Request ===== - pub struct hyper_request(pub(super) Request); pub struct hyper_response(pub(super) Response); -pub struct hyper_headers(pub(super) HeaderMap); +#[derive(Default)] +pub struct hyper_headers { + pub(super) headers: HeaderMap, + orig_casing: HeaderCaseMap, +} + +// Will probably be moved to `hyper::ext::http1` +#[derive(Debug, Default)] +pub(crate) struct HeaderCaseMap(HeaderMap); + +// ===== impl hyper_request ===== ffi_fn! { /// Construct a new HTTP request. @@ -96,7 +105,7 @@ ffi_fn! { /// This is not an owned reference, so it should not be accessed after the /// `hyper_request` has been consumed. fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { - hyper_headers::wrap(unsafe { &mut *req }.0.headers_mut()) + hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut()) } } @@ -114,7 +123,16 @@ ffi_fn! { } } -// ===== impl Response ===== +impl hyper_request { + pub(super) fn finalize_request(&mut self) { + if let Some(headers) = self.0.extensions_mut().remove::() { + *self.0.headers_mut() = headers.headers; + self.0.extensions_mut().insert(headers.orig_casing); + } + } +} + +// ===== impl hyper_response ===== ffi_fn! { /// Free an HTTP response after using it. @@ -159,7 +177,7 @@ ffi_fn! { /// This is not an owned reference, so it should not be accessed after the /// `hyper_response` has been freed. fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { - hyper_headers::wrap(unsafe { &mut *resp }.0.headers_mut()) + hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut()) } } @@ -173,6 +191,22 @@ ffi_fn! { } } +impl hyper_response { + pub(super) fn wrap(mut resp: Response) -> hyper_response { + let headers = std::mem::take(resp.headers_mut()); + let orig_casing = resp + .extensions_mut() + .remove::() + .unwrap_or_default(); + resp.extensions_mut().insert(hyper_headers { + headers, + orig_casing, + }); + + hyper_response(resp) + } +} + unsafe impl AsTaskType for hyper_response { fn as_task_type(&self) -> hyper_task_return_type { hyper_task_return_type::HYPER_TASK_RESPONSE @@ -185,9 +219,15 @@ type hyper_headers_foreach_callback = extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int; impl hyper_headers { - pub(crate) fn wrap(cx: &mut HeaderMap) -> &mut hyper_headers { - // A struct with only one field has the same layout as that field. - unsafe { std::mem::transmute::<&mut HeaderMap, &mut hyper_headers>(cx) } + pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers { + if let None = ext.get_mut::() { + ext.insert(hyper_headers { + headers: Default::default(), + orig_casing: Default::default(), + }); + } + + ext.get_mut::().unwrap() } } @@ -199,14 +239,31 @@ ffi_fn! { /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or /// `HYPER_ITER_BREAK` to stop. fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { - for (name, value) in unsafe { &*headers }.0.iter() { - let name_ptr = name.as_str().as_bytes().as_ptr(); - let name_len = name.as_str().as_bytes().len(); - let val_ptr = value.as_bytes().as_ptr(); - let val_len = value.as_bytes().len(); - - if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { - break; + let headers = unsafe { &*headers }; + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + for name in headers.headers.keys() { + let mut names = headers.orig_casing.get_all(name).iter(); + + for value in headers.headers.get_all(name) { + let (name_ptr, name_len) = if let Some(orig_name) = names.next() { + (orig_name.as_ptr(), orig_name.len()) + } else { + ( + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), + ) + }; + + let val_ptr = value.as_bytes().as_ptr(); + let val_len = value.as_bytes().len(); + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + return; + } } } } @@ -219,8 +276,9 @@ ffi_fn! { fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { let headers = unsafe { &mut *headers }; match unsafe { raw_name_value(name, name_len, value, value_len) } { - Ok((name, value)) => { - headers.0.insert(name, value); + Ok((name, value, orig_name)) => { + headers.headers.insert(&name, value); + headers.orig_casing.insert(name, orig_name); hyper_code::HYPERE_OK } Err(code) => code, @@ -237,8 +295,9 @@ ffi_fn! { let headers = unsafe { &mut *headers }; match unsafe { raw_name_value(name, name_len, value, value_len) } { - Ok((name, value)) => { - headers.0.append(name, value); + Ok((name, value, orig_name)) => { + headers.headers.append(&name, value); + headers.orig_casing.append(name, orig_name); hyper_code::HYPERE_OK } Err(code) => code, @@ -251,8 +310,9 @@ unsafe fn raw_name_value( name_len: size_t, value: *const u8, value_len: size_t, -) -> Result<(HeaderName, HeaderValue), hyper_code> { +) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> { let name = std::slice::from_raw_parts(name, name_len); + let orig_name = Bytes::copy_from_slice(name); let name = match HeaderName::from_bytes(name) { Ok(name) => name, Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), @@ -263,5 +323,78 @@ unsafe fn raw_name_value( Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), }; - Ok((name, value)) + Ok((name, value, orig_name)) +} + +// ===== impl HeaderCaseMap ===== + +impl HeaderCaseMap { + pub(crate) fn get_all(&self, name: &HeaderName) -> http::header::GetAll<'_, Bytes> { + self.0.get_all(name) + } + + pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { + self.0.insert(name, orig); + } + + pub(crate) fn append(&mut self, name: N, orig: Bytes) + where + N: http::header::IntoHeaderName, + { + self.0.append(name, orig); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_headers_foreach_cases_preserved() { + let mut headers = hyper_headers::default(); + + let name1 = b"Set-CookiE"; + let value1 = b"a=b"; + hyper_headers_add( + &mut headers, + name1.as_ptr(), + name1.len(), + value1.as_ptr(), + value1.len(), + ); + + let name2 = b"SET-COOKIE"; + let value2 = b"c=d"; + hyper_headers_add( + &mut headers, + name2.as_ptr(), + name2.len(), + value2.as_ptr(), + value2.len(), + ); + + let mut vec = Vec::::new(); + hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); + + assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n"); + + extern "C" fn concat( + vec: *mut c_void, + name: *const u8, + name_len: usize, + value: *const u8, + value_len: usize, + ) -> c_int { + unsafe { + let vec = &mut *(vec as *mut Vec); + let name = std::slice::from_raw_parts(name, name_len); + let value = std::slice::from_raw_parts(value, value_len); + vec.extend(name); + vec.extend(b": "); + vec.extend(value); + vec.extend(b"\r\n"); + } + HYPER_ITER_CONTINUE + } + } } diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index cee653d7d0..ffa9d6b1a9 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -28,6 +28,7 @@ mod io; mod task; pub(crate) use self::body::UserBody; +pub(crate) use self::http_types::HeaderCaseMap; pub const HYPER_ITER_CONTINUE: libc::c_int = 0; #[allow(unused)] diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 3226aaf885..9866e13369 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -44,6 +44,8 @@ where error: None, keep_alive: KA::Busy, method: None, + #[cfg(feature = "ffi")] + preserve_header_case: false, title_case_headers: false, notify_read: false, reading: Reading::Init, @@ -142,6 +144,8 @@ where ParseContext { cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, + #[cfg(feature = "ffi")] + preserve_header_case: self.state.preserve_header_case, } )) { Ok(msg) => msg, @@ -474,6 +478,16 @@ where self.enforce_version(&mut head); + // Maybe check if we should preserve header casing on received + // message headers... + #[cfg(feature = "ffi")] + { + if T::is_client() && !self.state.preserve_header_case { + self.state.preserve_header_case = + head.extensions.get::().is_some(); + } + } + let buf = self.io.headers_buf(); match super::role::encode_headers::( Encode { @@ -736,6 +750,8 @@ struct State { /// This is used to know things such as if the message can include /// a body or not. method: Option, + #[cfg(feature = "ffi")] + preserve_header_case: bool, title_case_headers: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index ab8616fec6..8bbb033346 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -492,7 +492,7 @@ cfg_server! { version: parts.version, subject: parts.status, headers: parts.headers, - extensions: http::Extensions::default(), + extensions: parts.extensions, }; Poll::Ready(Some(Ok((head, body)))) } else { @@ -576,7 +576,7 @@ cfg_client! { version: parts.version, subject: crate::proto::RequestLine(parts.method, parts.uri), headers: parts.headers, - extensions: http::Extensions::default(), + extensions: parts.extensions, }; *this.callback = Some(cb); Poll::Ready(Some(Ok((head, body)))) diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 85e4c016d9..da0ff82079 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -159,6 +159,8 @@ where ParseContext { cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, + #[cfg(feature = "ffi")] + preserve_header_case: parse_ctx.preserve_header_case, }, )? { Some(msg) => { @@ -636,6 +638,8 @@ mod tests { let parse_ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }; assert!(buffered .parse::(cx, parse_ctx) diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 4e1b1685b0..10aa09627b 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -70,6 +70,8 @@ pub(crate) struct ParsedMessage { pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, + #[cfg(feature = "ffi")] + preserve_header_case: bool, } /// Passed to Http1Transaction::encode diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 2a3b1fdd29..95015bfff8 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -148,6 +148,7 @@ impl Http1Transaction for Server { is_http_11 = false; Version::HTTP_10 }; + trace!("headers: {:?}", &req.headers); record_header_indices(bytes, &req.headers, &mut headers_indices)?; headers_len = req.headers.len(); @@ -692,6 +693,9 @@ impl Http1Transaction for Client { let mut keep_alive = version == Version::HTTP_11; + #[cfg(feature = "ffi")] + let mut header_case_map = crate::ffi::HeaderCaseMap::default(); + headers.reserve(headers_len); for header in &headers_indices[..headers_len] { let name = header_name!(&slice[header.name.0..header.name.1]); @@ -707,14 +711,28 @@ impl Http1Transaction for Client { keep_alive = headers::connection_keep_alive(&value); } } + + #[cfg(feature = "ffi")] + if ctx.preserve_header_case { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + headers.append(name, value); } + #[allow(unused_mut)] + let mut extensions = http::Extensions::default(); + + #[cfg(feature = "ffi")] + if ctx.preserve_header_case { + extensions.insert(header_case_map); + } + let head = MessageHead { version, subject: status, headers, - extensions: http::Extensions::default(), + extensions, }; if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { return Ok(Some(ParsedMessage { @@ -766,11 +784,28 @@ impl Http1Transaction for Client { } extend(dst, b"\r\n"); - if msg.title_case_headers { - write_headers_title_case(&msg.head.headers, dst); - } else { - write_headers(&msg.head.headers, dst); + #[cfg(feature = "ffi")] + { + if msg.title_case_headers { + write_headers_title_case(&msg.head.headers, dst); + } else if let Some(orig_headers) = + msg.head.extensions.get::() + { + write_headers_original_case(&msg.head.headers, orig_headers, dst); + } else { + write_headers(&msg.head.headers, dst); + } } + + #[cfg(not(feature = "ffi"))] + { + if msg.title_case_headers { + write_headers_title_case(&msg.head.headers, dst); + } else { + write_headers(&msg.head.headers, dst); + } + } + extend(dst, b"\r\n"); msg.head.headers.clear(); //TODO: remove when switching to drain() @@ -1081,6 +1116,40 @@ fn write_headers(headers: &HeaderMap, dst: &mut Vec) { } } +#[cfg(feature = "ffi")] +#[cold] +fn write_headers_original_case( + headers: &HeaderMap, + orig_case: &crate::ffi::HeaderCaseMap, + dst: &mut Vec, +) { + // For each header name/value pair, there may be a value in the casemap + // that corresponds to the HeaderValue. So, we iterator all the keys, + // and for each one, try to pair the originally cased name with the value. + // + // TODO: consider adding http::HeaderMap::entries() iterator + for name in headers.keys() { + let mut names = orig_case.get_all(name).iter(); + + for value in headers.get_all(name) { + if let Some(orig_name) = names.next() { + extend(dst, orig_name); + } else { + extend(dst, name.as_str().as_bytes()); + } + + // Wanted for curl test cases that send `X-Custom-Header:\r\n` + if value.is_empty() { + extend(dst, b":\r\n"); + } else { + extend(dst, b": "); + extend(dst, value.as_bytes()); + extend(dst, b"\r\n"); + } + } + } +} + struct FastWrite<'a>(&'a mut Vec); impl<'a> fmt::Write for FastWrite<'a> { @@ -1117,6 +1186,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut method, + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .unwrap() @@ -1137,6 +1208,8 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1152,6 +1225,8 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }; Server::parse(&mut raw, ctx).unwrap_err(); } @@ -1165,6 +1240,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .expect("parse ok") @@ -1178,6 +1255,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .expect_err(comment) @@ -1380,6 +1459,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, } ) .expect("parse ok") @@ -1393,6 +1474,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(m), + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .expect("parse ok") @@ -1406,6 +1489,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .expect_err("parse should err") @@ -1719,6 +1804,8 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .expect("parse ok") @@ -1727,6 +1814,42 @@ mod tests { assert_eq!(parsed.head.headers["server"], "hello\tworld"); } + #[cfg(feature = "ffi")] + #[test] + fn test_write_headers_orig_case_empty_value() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "".parse().expect("parse empty")); + let mut orig_cases = crate::ffi::HeaderCaseMap::default(); + orig_cases.insert(name, Bytes::from_static(b"X-EmptY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst); + + assert_eq!( + dst, b"X-EmptY:\r\n", + "there should be no space between the colon and CRLF" + ); + } + + #[cfg(feature = "ffi")] + #[test] + fn test_write_headers_orig_case_multiple_entries() { + let mut headers = HeaderMap::new(); + let name = http::header::HeaderName::from_static("x-empty"); + headers.insert(&name, "a".parse().unwrap()); + headers.append(&name, "b".parse().unwrap()); + + let mut orig_cases = crate::ffi::HeaderCaseMap::default(); + orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty")); + orig_cases.append(name, Bytes::from_static(b"X-EMPTY")); + + let mut dst = Vec::new(); + super::write_headers_original_case(&headers, &orig_cases, &mut dst); + + assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n"); + } + #[cfg(feature = "nightly")] use test::Bencher; @@ -1762,6 +1885,8 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .unwrap() @@ -1795,6 +1920,8 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + #[cfg(feature = "ffi")] + preserve_header_case: false, }, ) .unwrap() diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 27b3ef6f12..fe2e2e92ba 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -24,7 +24,6 @@ pub struct MessageHead { pub subject: S, /// Headers of the Incoming message. pub headers: http::HeaderMap, - /// Extensions. extensions: http::Extensions, } From c9c46ed60bd43484d67a98a63496ad0277c791a2 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 7 Jan 2021 17:22:12 -0800 Subject: [PATCH 009/420] refactor(ffi): Add Reason-Phrase API This adds an internal ability to copy the HTTP/1 reason-phrase and place it in the `http::Extensions` of a response, if it doesn't match the canonical reason. This could be exposed in the Rust API later, but for now it is only used by the C API. --- capi/examples/client.c | 4 +++- capi/include/hyper.h | 20 ++++++++++++++++++++ src/ffi/http_types.rs | 39 +++++++++++++++++++++++++++++++++++++++ src/ffi/mod.rs | 2 +- src/proto/h1/role.rs | 27 +++++++++++++++++++++++++-- 5 files changed, 88 insertions(+), 4 deletions(-) diff --git a/capi/examples/client.c b/capi/examples/client.c index f8f1805fb7..0a1f71240c 100644 --- a/capi/examples/client.c +++ b/capi/examples/client.c @@ -254,8 +254,10 @@ int main(int argc, char *argv[]) { hyper_task_free(task); uint16_t http_status = hyper_response_status(resp); + const uint8_t *rp = hyper_response_reason_phrase(resp); + size_t rp_len = hyper_response_reason_phrase_len(resp); - printf("\nResponse Status: %d\n", http_status); + printf("\nResponse Status: %d %.*s\n", http_status, (int) rp_len, rp); hyper_headers *headers = hyper_response_headers(resp); hyper_headers_foreach(headers, print_each_header, NULL); diff --git a/capi/include/hyper.h b/capi/include/hyper.h index f2a6f8dbfb..78934710af 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -349,6 +349,26 @@ void hyper_response_free(hyper_response *resp); */ uint16_t hyper_response_status(const hyper_response *resp); +/* + Get a pointer to the reason-phrase of this response. + + This buffer is not null-terminated. + + This buffer is owned by the response, and should not be used after + the response has been freed. + + Use `hyper_response_reason_phrase_len()` to get the length of this + buffer. + */ +const uint8_t *hyper_response_reason_phrase(const hyper_response *resp); + +/* + Get the length of the reason-phrase of this response. + + Use `hyper_response_reason_phrase()` to get the buffer pointer. + */ +size_t hyper_response_reason_phrase_len(const hyper_response *resp); + /* Get the HTTP version used by this response. diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index fdf645cac1..6dba5a494c 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -23,6 +23,9 @@ pub struct hyper_headers { #[derive(Debug, Default)] pub(crate) struct HeaderCaseMap(HeaderMap); +#[derive(Debug)] +pub(crate) struct ReasonPhrase(pub(crate) Bytes); + // ===== impl hyper_request ===== ffi_fn! { @@ -150,6 +153,30 @@ ffi_fn! { } } +ffi_fn! { + /// Get a pointer to the reason-phrase of this response. + /// + /// This buffer is not null-terminated. + /// + /// This buffer is owned by the response, and should not be used after + /// the response has been freed. + /// + /// Use `hyper_response_reason_phrase_len()` to get the length of this + /// buffer. + fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { + unsafe { &*resp }.reason_phrase().as_ptr() + } +} + +ffi_fn! { + /// Get the length of the reason-phrase of this response. + /// + /// Use `hyper_response_reason_phrase()` to get the buffer pointer. + fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t { + unsafe { &*resp }.reason_phrase().len() + } +} + ffi_fn! { /// Get the HTTP version used by this response. /// @@ -205,6 +232,18 @@ impl hyper_response { hyper_response(resp) } + + fn reason_phrase(&self) -> &[u8] { + if let Some(reason) = self.0.extensions().get::() { + return &reason.0; + } + + if let Some(reason) = self.0.status().canonical_reason() { + return reason.as_bytes(); + } + + &[] + } } unsafe impl AsTaskType for hyper_response { diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index ffa9d6b1a9..423a0776c6 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -28,7 +28,7 @@ mod io; mod task; pub(crate) use self::body::UserBody; -pub(crate) use self::http_types::HeaderCaseMap; +pub(crate) use self::http_types::{HeaderCaseMap, ReasonPhrase}; pub const HYPER_ITER_CONTINUE: libc::c_int = 0; #[allow(unused)] diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 95015bfff8..0c7eb1eecd 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -5,6 +5,8 @@ use std::fmt::{self, Write}; use std::mem; +#[cfg(feature = "ffi")] +use bytes::Bytes; use bytes::BytesMut; use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; @@ -660,7 +662,7 @@ impl Http1Transaction for Client { loop { // Unsafe: see comment in Server Http1Transaction, above. let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; - let (len, status, version, headers_len) = { + let (len, status, reason, version, headers_len) = { let mut headers: [httparse::Header<'_>; MAX_HEADERS] = unsafe { mem::uninitialized() }; trace!( @@ -674,6 +676,20 @@ impl Http1Transaction for Client { httparse::Status::Complete(len) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; + + #[cfg(not(feature = "ffi"))] + let reason = (); + #[cfg(feature = "ffi")] + let reason = { + let reason = res.reason.unwrap(); + // Only save the reason phrase if it isnt the canonical reason + if Some(reason) != status.canonical_reason() { + Some(Bytes::copy_from_slice(reason.as_bytes())) + } else { + None + } + }; + let version = if res.version.unwrap() == 1 { Version::HTTP_11 } else { @@ -681,7 +697,7 @@ impl Http1Transaction for Client { }; record_header_indices(bytes, &res.headers, &mut headers_indices)?; let headers_len = res.headers.len(); - (len, status, version, headers_len) + (len, status, reason, version, headers_len) } httparse::Status::Partial => return Ok(None), } @@ -728,6 +744,13 @@ impl Http1Transaction for Client { extensions.insert(header_case_map); } + #[cfg(feature = "ffi")] + if let Some(reason) = reason { + extensions.insert(crate::ffi::ReasonPhrase(reason)); + } + #[cfg(not(feature = "ffi"))] + drop(reason); + let head = MessageHead { version, subject: status, From 446dd2f0c70dba22d8d589a68b224c2838a4ddf1 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 12 Jan 2021 14:45:10 -0800 Subject: [PATCH 010/420] chore(security): add a SECURITY.md policy (#2395) --- SECURITY.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..b074a574b4 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +hyper (and related projects in hyperium) use the same security policy as the [Tokio project][tokio-security]. + +## Report a security issue + +The process for reporting an issue is the same as the [Tokio project][tokio-security]. This includes private reporting via security@tokio.rs. + +[tokio-security]: https://github.com/tokio-rs/tokio/security/policy From a15f3f7f0f536c74d51636bbc00f6b5ec110472b Mon Sep 17 00:00:00 2001 From: Markus Westerlind Date: Wed, 13 Jan 2021 01:18:22 +0100 Subject: [PATCH 011/420] refactor(client): minimize the amount of instantiated code (#2391) * Halve the amount of conn_task instantiations * Extract non-generic parts of client::handshake * Extract the non-generic parts of call_async --- src/client/connect/http.rs | 94 +++++++++++++++++++++----------------- src/proto/h2/client.rs | 61 ++++++++++++++----------- 2 files changed, 85 insertions(+), 70 deletions(-) diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index eac980fa3d..734aea188a 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -272,54 +272,60 @@ where } } -impl HttpConnector -where - R: Resolve, -{ - async fn call_async(&mut self, dst: Uri) -> Result { - trace!( - "Http::connect; scheme={:?}, host={:?}, port={:?}", - dst.scheme(), - dst.host(), - dst.port(), - ); - - if self.config.enforce_http { - if dst.scheme() != Some(&Scheme::HTTP) { - return Err(ConnectError { - msg: INVALID_NOT_HTTP.into(), - cause: None, - }); - } - } else if dst.scheme().is_none() { +fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { + trace!( + "Http::connect; scheme={:?}, host={:?}, port={:?}", + dst.scheme(), + dst.host(), + dst.port(), + ); + + if config.enforce_http { + if dst.scheme() != Some(&Scheme::HTTP) { return Err(ConnectError { - msg: INVALID_MISSING_SCHEME.into(), + msg: INVALID_NOT_HTTP.into(), cause: None, }); } + } else if dst.scheme().is_none() { + return Err(ConnectError { + msg: INVALID_MISSING_SCHEME.into(), + cause: None, + }); + } - let host = match dst.host() { - Some(s) => s, - None => { - return Err(ConnectError { - msg: INVALID_MISSING_HOST.into(), - cause: None, - }) - } - }; - let port = match dst.port() { - Some(port) => port.as_u16(), - None => { - if dst.scheme() == Some(&Scheme::HTTPS) { - 443 - } else { - 80 - } + let host = match dst.host() { + Some(s) => s, + None => { + return Err(ConnectError { + msg: INVALID_MISSING_HOST.into(), + cause: None, + }) + } + }; + let port = match dst.port() { + Some(port) => port.as_u16(), + None => { + if dst.scheme() == Some(&Scheme::HTTPS) { + 443 + } else { + 80 } - }; + } + }; + + Ok((host, port)) +} +impl HttpConnector +where + R: Resolve, +{ + async fn call_async(&mut self, dst: Uri) -> Result { let config = &self.config; + let (host, port) = get_host_port(config, &dst)?; + // If the host is already an IP addr (v4 or v6), // skip resolving the dns and start connecting right away. let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { @@ -328,10 +334,12 @@ where let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) .await .map_err(ConnectError::dns)?; - let addrs = addrs.map(|mut addr| { - addr.set_port(port); - addr - }).collect(); + let addrs = addrs + .map(|mut addr| { + addr.set_port(port); + addr + }) + .collect(); dns::SocketAddrs::new(addrs) }; diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 069232dffe..3c837feb9c 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -63,6 +63,32 @@ impl Default for Config { } } +fn new_builder(config: &Config) -> Builder { + let mut builder = Builder::default(); + builder + .initial_window_size(config.initial_stream_window_size) + .initial_connection_window_size(config.initial_conn_window_size) + .max_frame_size(config.max_frame_size) + .enable_push(false); + builder +} + +fn new_ping_config(config: &Config) -> ping::Config { + ping::Config { + bdp_initial_window: if config.adaptive_window { + Some(config.initial_stream_window_size) + } else { + None + }, + #[cfg(feature = "runtime")] + keep_alive_interval: config.keep_alive_interval, + #[cfg(feature = "runtime")] + keep_alive_timeout: config.keep_alive_timeout, + #[cfg(feature = "runtime")] + keep_alive_while_idle: config.keep_alive_while_idle, + } +} + pub(crate) async fn handshake( io: T, req_rx: ClientRx, @@ -74,11 +100,7 @@ where B: HttpBody, B::Data: Send + 'static, { - let (h2_tx, mut conn) = Builder::default() - .initial_window_size(config.initial_stream_window_size) - .initial_connection_window_size(config.initial_conn_window_size) - .max_frame_size(config.max_frame_size) - .enable_push(false) + let (h2_tx, mut conn) = new_builder(config) .handshake::<_, SendBuf>(io) .await .map_err(crate::Error::new_h2)?; @@ -96,21 +118,9 @@ where } }); - let ping_config = ping::Config { - bdp_initial_window: if config.adaptive_window { - Some(config.initial_stream_window_size) - } else { - None - }, - #[cfg(feature = "runtime")] - keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] - keep_alive_timeout: config.keep_alive_timeout, - #[cfg(feature = "runtime")] - keep_alive_while_idle: config.keep_alive_while_idle, - }; + let ping_config = new_ping_config(&config); - let ping = if ping_config.is_enabled() { + let (conn, ping) = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); let (recorder, mut ponger) = ping::channel(pp, ping_config); @@ -130,16 +140,13 @@ where Pin::new(&mut conn).poll(cx) }); - let conn = conn.map_err(|e| debug!("connection error: {}", e)); - - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); - recorder + (Either::Left(conn), recorder) } else { - let conn = conn.map_err(|e| debug!("connection error: {}", e)); - - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); - ping::disabled() + (Either::Right(conn), ping::disabled()) }; + let conn = conn.map_err(|e| debug!("connection error: {}", e)); + + exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); Ok(ClientTask { ping, From f0ddb669328163001fd18a4a21109e95047848bf Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Fri, 15 Jan 2021 02:57:55 +0900 Subject: [PATCH 012/420] refactor(lib): apply unreachable_pub lint (#2400) Closes #2390 --- src/client/conn.rs | 2 +- src/client/connect/dns.rs | 2 +- src/client/dispatch.rs | 32 +++++++++--------- src/client/mod.rs | 2 +- src/client/pool.rs | 8 ++--- src/common/date.rs | 6 ++-- src/common/drain.rs | 12 +++---- src/common/exec.rs | 2 +- src/common/mod.rs | 8 ++++- src/common/never.rs | 2 +- src/common/sync_wrapper.rs | 8 ++--- src/error.rs | 68 +++++++++++++++++++------------------- src/ffi/mod.rs | 2 ++ src/headers.rs | 22 ++++++------ src/lib.rs | 1 + src/proto/h1/conn.rs | 56 +++++++++++++++---------------- src/proto/h1/decode.rs | 12 +++---- src/proto/h1/dispatch.rs | 16 ++++----- src/proto/h1/encode.rs | 24 +++++++------- src/proto/h1/io.rs | 38 ++++++++++----------- src/proto/h1/mod.rs | 8 ++--- src/proto/h2/server.rs | 2 +- src/proto/mod.rs | 16 ++++----- src/server/conn.rs | 6 ++-- src/server/tcp.rs | 1 + src/service/make.rs | 1 + src/service/mod.rs | 8 ++--- src/upgrade.rs | 16 ++++----- 28 files changed, 196 insertions(+), 185 deletions(-) diff --git a/src/client/conn.rs b/src/client/conn.rs index 23b548dacd..fd25741f0f 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -272,7 +272,7 @@ where ResponseFuture { inner } } - pub(crate) fn send_request_retryable( + pub(super) fn send_request_retryable( &mut self, req: Request, ) -> impl Future, (crate::Error, Option>)>> + Unpin diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs index 90d3d4a28a..08cbb1e883 100644 --- a/src/client/connect/dns.rs +++ b/src/client/connect/dns.rs @@ -346,7 +346,7 @@ mod sealed { } } -pub(crate) async fn resolve(resolver: &mut R, name: Name) -> Result +pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result where R: Resolve, { diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index a7e6311bad..804eebbfc2 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -6,10 +6,10 @@ use tokio::sync::{mpsc, oneshot}; use crate::common::{task, Pin, Poll}; -pub type RetryPromise = oneshot::Receiver)>>; -pub type Promise = oneshot::Receiver>; +pub(crate) type RetryPromise = oneshot::Receiver)>>; +pub(crate) type Promise = oneshot::Receiver>; -pub fn channel() -> (Sender, Receiver) { +pub(crate) fn channel() -> (Sender, Receiver) { let (tx, rx) = mpsc::unbounded_channel(); let (giver, taker) = want::new(); let tx = Sender { @@ -25,7 +25,7 @@ pub fn channel() -> (Sender, Receiver) { /// /// While the inner sender is unbounded, the Giver is used to determine /// if the Receiver is ready for another request. -pub struct Sender { +pub(crate) struct Sender { /// One message is always allowed, even if the Receiver hasn't asked /// for it yet. This boolean keeps track of whether we've sent one /// without notice. @@ -44,24 +44,24 @@ pub struct Sender { /// Cannot poll the Giver, but can still use it to determine if the Receiver /// has been dropped. However, this version can be cloned. #[cfg(feature = "http2")] -pub struct UnboundedSender { +pub(crate) struct UnboundedSender { /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. giver: want::SharedGiver, inner: mpsc::UnboundedSender>, } impl Sender { - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.giver .poll_want(cx) .map_err(|_| crate::Error::new_closed()) } - pub fn is_ready(&self) -> bool { + pub(crate) fn is_ready(&self) -> bool { self.giver.is_wanting() } - pub fn is_closed(&self) -> bool { + pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } @@ -78,7 +78,7 @@ impl Sender { } } - pub fn try_send(&mut self, val: T) -> Result, T> { + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); } @@ -89,7 +89,7 @@ impl Sender { .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } - pub fn send(&mut self, val: T) -> Result, T> { + pub(crate) fn send(&mut self, val: T) -> Result, T> { if !self.can_send() { return Err(val); } @@ -101,7 +101,7 @@ impl Sender { } #[cfg(feature = "http2")] - pub fn unbound(self) -> UnboundedSender { + pub(crate) fn unbound(self) -> UnboundedSender { UnboundedSender { giver: self.giver.shared(), inner: self.inner, @@ -111,15 +111,15 @@ impl Sender { #[cfg(feature = "http2")] impl UnboundedSender { - pub fn is_ready(&self) -> bool { + pub(crate) fn is_ready(&self) -> bool { !self.giver.is_canceled() } - pub fn is_closed(&self) -> bool { + pub(crate) fn is_closed(&self) -> bool { self.giver.is_canceled() } - pub fn try_send(&mut self, val: T) -> Result, T> { + pub(crate) fn try_send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner .send(Envelope(Some((val, Callback::Retry(tx))))) @@ -139,7 +139,7 @@ impl Clone for UnboundedSender { } #[pin_project::pin_project(PinnedDrop)] -pub struct Receiver { +pub(crate) struct Receiver { #[pin] inner: mpsc::UnboundedReceiver>, taker: want::Taker, @@ -199,7 +199,7 @@ impl Drop for Envelope { } } -pub enum Callback { +pub(crate) enum Callback { Retry(oneshot::Sender)>>), NoRetry(oneshot::Sender>), } diff --git a/src/client/mod.rs b/src/client/mod.rs index 7f3006aa3b..9600a7640d 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -63,7 +63,7 @@ cfg_feature! { mod client; pub mod conn; - pub(crate) mod dispatch; + pub(super) mod dispatch; mod pool; pub mod service; } diff --git a/src/client/pool.rs b/src/client/pool.rs index bbee0344d5..0f22657bd4 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -100,7 +100,7 @@ impl Config { } impl Pool { - pub fn new(config: Config, __exec: &Exec) -> Pool { + pub(super) fn new(config: Config, __exec: &Exec) -> Pool { let inner = if config.is_enabled() { Some(Arc::new(Mutex::new(PoolInner { connecting: HashSet::new(), @@ -140,7 +140,7 @@ impl Pool { impl Pool { /// Returns a `Checkout` which is a future that resolves if an idle /// connection becomes available. - pub fn checkout(&self, key: Key) -> Checkout { + pub(super) fn checkout(&self, key: Key) -> Checkout { Checkout { key, pool: self.clone(), @@ -489,11 +489,11 @@ pub(super) struct Pooled { } impl Pooled { - pub fn is_reused(&self) -> bool { + pub(super) fn is_reused(&self) -> bool { self.is_reused } - pub fn is_pool_enabled(&self) -> bool { + pub(super) fn is_pool_enabled(&self) -> bool { self.pool.0.is_some() } diff --git a/src/common/date.rs b/src/common/date.rs index e8f9f7026b..a436fc07c0 100644 --- a/src/common/date.rs +++ b/src/common/date.rs @@ -8,17 +8,17 @@ use http::header::HeaderValue; use httpdate::HttpDate; // "Sun, 06 Nov 1994 08:49:37 GMT".len() -pub const DATE_VALUE_LENGTH: usize = 29; +pub(crate) const DATE_VALUE_LENGTH: usize = 29; #[cfg(feature = "http1")] -pub fn extend(dst: &mut Vec) { +pub(crate) fn extend(dst: &mut Vec) { CACHED.with(|cache| { dst.extend_from_slice(cache.borrow().buffer()); }) } #[cfg(feature = "http1")] -pub fn update() { +pub(crate) fn update() { CACHED.with(|cache| { cache.borrow_mut().check(); }) diff --git a/src/common/drain.rs b/src/common/drain.rs index 4f04fd6154..4bb2ecc118 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -5,19 +5,19 @@ use tokio::sync::watch; use super::{task, Future, Pin, Poll}; -pub fn channel() -> (Signal, Watch) { +pub(crate) fn channel() -> (Signal, Watch) { let (tx, rx) = watch::channel(()); (Signal { tx }, Watch { rx }) } -pub struct Signal { +pub(crate) struct Signal { tx: watch::Sender<()>, } -pub struct Draining(Pin + Send + Sync>>); +pub(crate) struct Draining(Pin + Send + Sync>>); #[derive(Clone)] -pub struct Watch { +pub(crate) struct Watch { rx: watch::Receiver<()>, } @@ -37,7 +37,7 @@ enum State { } impl Signal { - pub fn drain(self) -> Draining { + pub(crate) fn drain(self) -> Draining { let _ = self.tx.send(()); Draining(Box::pin(async move { self.tx.closed().await })) } @@ -52,7 +52,7 @@ impl Future for Draining { } impl Watch { - pub fn watch(self, future: F, on_drain: FN) -> Watching + pub(crate) fn watch(self, future: F, on_drain: FN) -> Watching where F: Future, FN: FnOnce(Pin<&mut F>), diff --git a/src/common/exec.rs b/src/common/exec.rs index 169a202a0d..c52482a336 100644 --- a/src/common/exec.rs +++ b/src/common/exec.rs @@ -24,7 +24,7 @@ pub trait NewSvcExec, E, W: Watcher>: Clone fn execute_new_svc(&mut self, fut: NewSvcTask); } -pub type BoxSendFuture = Pin + Send>>; +pub(crate) type BoxSendFuture = Pin + Send>>; // Either the user provides an executor for background tasks, or we use // `tokio::spawn`. diff --git a/src/common/mod.rs b/src/common/mod.rs index 4b1233bf3f..a5947315b8 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -29,7 +29,13 @@ pub(crate) mod watch; #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] pub(crate) use self::lazy::{lazy, Started as Lazy}; -pub use self::never::Never; +#[cfg(any( + feature = "client", + feature = "http1", + feature = "http2", + feature = "runtime" +))] +pub(crate) use self::never::Never; pub(crate) use self::task::Poll; // group up types normally needed for `Future` diff --git a/src/common/never.rs b/src/common/never.rs index f4fdb95ddd..f143caf60f 100644 --- a/src/common/never.rs +++ b/src/common/never.rs @@ -6,7 +6,7 @@ use std::error::Error; use std::fmt; #[derive(Debug)] -pub enum Never {} +pub(crate) enum Never {} impl fmt::Display for Never { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs index 1e4aa4039c..05b11e2c4b 100644 --- a/src/common/sync_wrapper.rs +++ b/src/common/sync_wrapper.rs @@ -46,7 +46,7 @@ /// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll /// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html #[repr(transparent)] -pub struct SyncWrapper(T); +pub(crate) struct SyncWrapper(T); impl SyncWrapper { /// Creates a new SyncWrapper containing the given value. @@ -58,7 +58,7 @@ impl SyncWrapper { /// /// let wrapped = SyncWrapper::new(42); /// ``` - pub fn new(value: T) -> Self { + pub(crate) fn new(value: T) -> Self { Self(value) } @@ -82,7 +82,7 @@ impl SyncWrapper { /// *value = 0; /// assert_eq!(*wrapped.get_mut(), 0); /// ``` - pub fn get_mut(&mut self) -> &mut T { + pub(crate) fn get_mut(&mut self) -> &mut T { &mut self.0 } @@ -105,7 +105,7 @@ impl SyncWrapper { /// assert_eq!(wrapped.into_inner(), 42); /// ``` #[allow(dead_code)] - pub fn into_inner(self) -> T { + pub(crate) fn into_inner(self) -> T { self.0 } } diff --git a/src/error.rs b/src/error.rs index 68b042f0c8..663156e0a9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -18,7 +18,7 @@ struct ErrorImpl { } #[derive(Debug, PartialEq)] -pub(crate) enum Kind { +pub(super) enum Kind { Parse(Parse), User(User), /// A message reached EOF, but is not complete. @@ -64,7 +64,7 @@ pub(crate) enum Kind { } #[derive(Debug, PartialEq)] -pub(crate) enum Parse { +pub(super) enum Parse { Method, Version, #[cfg(feature = "http1")] @@ -76,7 +76,7 @@ pub(crate) enum Parse { } #[derive(Debug, PartialEq)] -pub(crate) enum User { +pub(super) enum User { /// Error calling user's HttpBody::poll_data(). #[cfg(any(feature = "http1", feature = "http2"))] Body, @@ -124,7 +124,7 @@ pub(crate) enum User { // Sentinel type to indicate the error was caused by a timeout. #[derive(Debug)] -pub(crate) struct TimedOut; +pub(super) struct TimedOut; impl Error { /// Returns true if this was an HTTP parse error. @@ -172,19 +172,19 @@ impl Error { self.inner.cause } - pub(crate) fn new(kind: Kind) -> Error { + pub(super) fn new(kind: Kind) -> Error { Error { inner: Box::new(ErrorImpl { kind, cause: None }), } } - pub(crate) fn with>(mut self, cause: C) -> Error { + pub(super) fn with>(mut self, cause: C) -> Error { self.inner.cause = Some(cause.into()); self } #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] - pub(crate) fn kind(&self) -> &Kind { + pub(super) fn kind(&self) -> &Kind { &self.inner.kind } @@ -202,7 +202,7 @@ impl Error { } #[cfg(feature = "http2")] - pub(crate) fn h2_reason(&self) -> h2::Reason { + pub(super) fn h2_reason(&self) -> h2::Reason { // Find an h2::Reason somewhere in the cause stack, if it exists, // otherwise assume an INTERNAL_ERROR. self.find_source::() @@ -210,68 +210,68 @@ impl Error { .unwrap_or(h2::Reason::INTERNAL_ERROR) } - pub(crate) fn new_canceled() -> Error { + pub(super) fn new_canceled() -> Error { Error::new(Kind::Canceled) } #[cfg(feature = "http1")] - pub(crate) fn new_incomplete() -> Error { + pub(super) fn new_incomplete() -> Error { Error::new(Kind::IncompleteMessage) } #[cfg(feature = "http1")] - pub(crate) fn new_too_large() -> Error { + pub(super) fn new_too_large() -> Error { Error::new(Kind::Parse(Parse::TooLarge)) } #[cfg(feature = "http1")] - pub(crate) fn new_version_h2() -> Error { + pub(super) fn new_version_h2() -> Error { Error::new(Kind::Parse(Parse::VersionH2)) } #[cfg(feature = "http1")] - pub(crate) fn new_unexpected_message() -> Error { + pub(super) fn new_unexpected_message() -> Error { Error::new(Kind::UnexpectedMessage) } #[cfg(any(feature = "http1", feature = "http2"))] - pub(crate) fn new_io(cause: std::io::Error) -> Error { + pub(super) fn new_io(cause: std::io::Error) -> Error { Error::new(Kind::Io).with(cause) } #[cfg(all(any(feature = "http1", feature = "http2"), feature = "tcp"))] #[cfg(feature = "server")] - pub(crate) fn new_listen>(cause: E) -> Error { + pub(super) fn new_listen>(cause: E) -> Error { Error::new(Kind::Listen).with(cause) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] - pub(crate) fn new_accept>(cause: E) -> Error { + pub(super) fn new_accept>(cause: E) -> Error { Error::new(Kind::Accept).with(cause) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] - pub(crate) fn new_connect>(cause: E) -> Error { + pub(super) fn new_connect>(cause: E) -> Error { Error::new(Kind::Connect).with(cause) } - pub(crate) fn new_closed() -> Error { + pub(super) fn new_closed() -> Error { Error::new(Kind::ChannelClosed) } #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] - pub(crate) fn new_body>(cause: E) -> Error { + pub(super) fn new_body>(cause: E) -> Error { Error::new(Kind::Body).with(cause) } #[cfg(any(feature = "http1", feature = "http2"))] - pub(crate) fn new_body_write>(cause: E) -> Error { + pub(super) fn new_body_write>(cause: E) -> Error { Error::new(Kind::BodyWrite).with(cause) } - pub(crate) fn new_body_write_aborted() -> Error { + pub(super) fn new_body_write_aborted() -> Error { Error::new(Kind::BodyWriteAborted) } @@ -281,71 +281,71 @@ impl Error { #[cfg(feature = "http1")] #[cfg(feature = "server")] - pub(crate) fn new_user_header() -> Error { + pub(super) fn new_user_header() -> Error { Error::new_user(User::UnexpectedHeader) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] - pub(crate) fn new_user_unsupported_version() -> Error { + pub(super) fn new_user_unsupported_version() -> Error { Error::new_user(User::UnsupportedVersion) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] - pub(crate) fn new_user_unsupported_request_method() -> Error { + pub(super) fn new_user_unsupported_request_method() -> Error { Error::new_user(User::UnsupportedRequestMethod) } #[cfg(feature = "http1")] #[cfg(feature = "server")] - pub(crate) fn new_user_unsupported_status_code() -> Error { + pub(super) fn new_user_unsupported_status_code() -> Error { Error::new_user(User::UnsupportedStatusCode) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] - pub(crate) fn new_user_absolute_uri_required() -> Error { + pub(super) fn new_user_absolute_uri_required() -> Error { Error::new_user(User::AbsoluteUriRequired) } - pub(crate) fn new_user_no_upgrade() -> Error { + pub(super) fn new_user_no_upgrade() -> Error { Error::new_user(User::NoUpgrade) } #[cfg(feature = "http1")] - pub(crate) fn new_user_manual_upgrade() -> Error { + pub(super) fn new_user_manual_upgrade() -> Error { Error::new_user(User::ManualUpgrade) } #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] - pub(crate) fn new_user_make_service>(cause: E) -> Error { + pub(super) fn new_user_make_service>(cause: E) -> Error { Error::new_user(User::MakeService).with(cause) } #[cfg(any(feature = "http1", feature = "http2"))] - pub(crate) fn new_user_service>(cause: E) -> Error { + pub(super) fn new_user_service>(cause: E) -> Error { Error::new_user(User::Service).with(cause) } #[cfg(any(feature = "http1", feature = "http2"))] - pub(crate) fn new_user_body>(cause: E) -> Error { + pub(super) fn new_user_body>(cause: E) -> Error { Error::new_user(User::Body).with(cause) } #[cfg(feature = "http1")] - pub(crate) fn new_shutdown(cause: std::io::Error) -> Error { + pub(super) fn new_shutdown(cause: std::io::Error) -> Error { Error::new(Kind::Shutdown).with(cause) } #[cfg(feature = "ffi")] - pub(crate) fn new_user_aborted_by_callback() -> Error { + pub(super) fn new_user_aborted_by_callback() -> Error { Error::new_user(User::AbortedByCallback) } #[cfg(feature = "http2")] - pub(crate) fn new_h2(cause: ::h2::Error) -> Error { + pub(super) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { Error::new_io(cause.into_io().expect("h2::Error::is_io")) } else { diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 423a0776c6..b593c89d77 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -1,5 +1,7 @@ // We have a lot of c-types in here, stop warning about their names! #![allow(non_camel_case_types)] +// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod. +#![allow(unreachable_pub)] // We may eventually allow the FFI to be enabled without `client` or `http1`, // that is why we don't auto enable them as `ffi = ["client", "http1"]` in diff --git a/src/headers.rs b/src/headers.rs index 8e06fd4b20..897aa05d01 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -8,12 +8,12 @@ use http::Method; use http::HeaderMap; #[cfg(feature = "http1")] -pub fn connection_keep_alive(value: &HeaderValue) -> bool { +pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { connection_has(value, "keep-alive") } #[cfg(feature = "http1")] -pub fn connection_close(value: &HeaderValue) -> bool { +pub(super) fn connection_close(value: &HeaderValue) -> bool { connection_has(value, "close") } @@ -31,15 +31,15 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool { #[cfg(feature = "http1")] #[cfg(feature = "server")] -pub fn content_length_parse(value: &HeaderValue) -> Option { +pub(super) fn content_length_parse(value: &HeaderValue) -> Option { value.to_str().ok().and_then(|s| s.parse().ok()) } -pub fn content_length_parse_all(headers: &HeaderMap) -> Option { +pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) } -pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { +pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { // If multiple Content-Length headers were sent, everything can still // be alright if they all contain the same value, and all parse // correctly. If not, then it's an error. @@ -68,7 +68,7 @@ pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Op #[cfg(feature = "http2")] #[cfg(feature = "client")] -pub fn method_has_defined_payload_semantics(method: &Method) -> bool { +pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { match *method { Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, _ => true, @@ -76,19 +76,19 @@ pub fn method_has_defined_payload_semantics(method: &Method) -> bool { } #[cfg(feature = "http2")] -pub fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { +pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { headers .entry(CONTENT_LENGTH) .or_insert_with(|| HeaderValue::from(len)); } #[cfg(feature = "http1")] -pub fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { +pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) } #[cfg(feature = "http1")] -pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { +pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { // chunked must always be the last encoding, according to spec if let Some(line) = encodings.next_back() { return is_chunked_(line); @@ -98,7 +98,7 @@ pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { } #[cfg(feature = "http1")] -pub fn is_chunked_(value: &HeaderValue) -> bool { +pub(super) fn is_chunked_(value: &HeaderValue) -> bool { // chunked must always be the last encoding, according to spec if let Ok(s) = value.to_str() { if let Some(encoding) = s.rsplit(',').next() { @@ -110,7 +110,7 @@ pub fn is_chunked_(value: &HeaderValue) -> bool { } #[cfg(feature = "http1")] -pub fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { +pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { const CHUNKED: &str = "chunked"; if let Some(line) = entry.iter_mut().next_back() { diff --git a/src/lib.rs b/src/lib.rs index e9b04229ab..8b16e31293 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,6 +2,7 @@ #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] +#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] #![cfg_attr(test, deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] #![cfg_attr(docsrs, feature(doc_cfg))] diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 9866e13369..174a1d8695 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -35,7 +35,7 @@ where B: Buf, T: Http1Transaction, { - pub fn new(io: I) -> Conn { + pub(crate) fn new(io: I) -> Conn { Conn { io: Buffered::new(io), state: State { @@ -60,21 +60,21 @@ where } #[cfg(feature = "server")] - pub fn set_flush_pipeline(&mut self, enabled: bool) { + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { self.io.set_flush_pipeline(enabled); } - pub fn set_max_buf_size(&mut self, max: usize) { + pub(crate) fn set_max_buf_size(&mut self, max: usize) { self.io.set_max_buf_size(max); } #[cfg(feature = "client")] - pub fn set_read_buf_exact_size(&mut self, sz: usize) { + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.io.set_read_buf_exact_size(sz); } #[cfg(feature = "client")] - pub fn set_title_case_headers(&mut self) { + pub(crate) fn set_title_case_headers(&mut self) { self.state.title_case_headers = true; } @@ -83,23 +83,23 @@ where self.state.allow_half_close = true; } - pub fn into_inner(self) -> (I, Bytes) { + pub(crate) fn into_inner(self) -> (I, Bytes) { self.io.into_inner() } - pub fn pending_upgrade(&mut self) -> Option { + pub(crate) fn pending_upgrade(&mut self) -> Option { self.state.upgrade.take() } - pub fn is_read_closed(&self) -> bool { + pub(crate) fn is_read_closed(&self) -> bool { self.state.is_read_closed() } - pub fn is_write_closed(&self) -> bool { + pub(crate) fn is_write_closed(&self) -> bool { self.state.is_write_closed() } - pub fn can_read_head(&self) -> bool { + pub(crate) fn can_read_head(&self) -> bool { match self.state.reading { Reading::Init => { if T::should_read_first() { @@ -115,7 +115,7 @@ where } } - pub fn can_read_body(&self) -> bool { + pub(crate) fn can_read_body(&self) -> bool { match self.state.reading { Reading::Body(..) | Reading::Continue(..) => true, _ => false, @@ -211,7 +211,7 @@ where } } - pub fn poll_read_body( + pub(crate) fn poll_read_body( &mut self, cx: &mut task::Context<'_>, ) -> Poll>> { @@ -268,13 +268,13 @@ where ret } - pub fn wants_read_again(&mut self) -> bool { + pub(crate) fn wants_read_again(&mut self) -> bool { let ret = self.state.notify_read; self.state.notify_read = false; ret } - pub fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body()); if self.is_read_closed() { @@ -412,7 +412,7 @@ where self.maybe_notify(cx); } - pub fn can_write_head(&self) -> bool { + pub(crate) fn can_write_head(&self) -> bool { if !T::should_read_first() { if let Reading::Closed = self.state.reading { return false; @@ -424,18 +424,18 @@ where } } - pub fn can_write_body(&self) -> bool { + pub(crate) fn can_write_body(&self) -> bool { match self.state.writing { Writing::Body(..) => true, Writing::Init | Writing::KeepAlive | Writing::Closed => false, } } - pub fn can_buffer_body(&self) -> bool { + pub(crate) fn can_buffer_body(&self) -> bool { self.io.can_buffer() } - pub fn write_head(&mut self, head: MessageHead, body: Option) { + pub(crate) fn write_head(&mut self, head: MessageHead, body: Option) { if let Some(encoder) = self.encode_head(head, body) { self.state.writing = if !encoder.is_eof() { Writing::Body(encoder) @@ -447,7 +447,7 @@ where } } - pub fn write_full_msg(&mut self, head: MessageHead, body: B) { + pub(crate) fn write_full_msg(&mut self, head: MessageHead, body: B) { if let Some(encoder) = self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) { @@ -555,7 +555,7 @@ where // the user's headers be. } - pub fn write_body(&mut self, chunk: B) { + pub(crate) fn write_body(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); @@ -580,7 +580,7 @@ where self.state.writing = state; } - pub fn write_body_and_end(&mut self, chunk: B) { + pub(crate) fn write_body_and_end(&mut self, chunk: B) { debug_assert!(self.can_write_body() && self.can_buffer_body()); // empty chunks should be discarded at Dispatcher level debug_assert!(chunk.remaining() != 0); @@ -600,7 +600,7 @@ where self.state.writing = state; } - pub fn end_body(&mut self) -> crate::Result<()> { + pub(crate) fn end_body(&mut self) -> crate::Result<()> { debug_assert!(self.can_write_body()); let mut res = Ok(()); @@ -657,14 +657,14 @@ where Err(err) } - pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { ready!(Pin::new(&mut self.io).poll_flush(cx))?; self.try_keep_alive(cx); trace!("flushed({}): {:?}", T::LOG, self.state); Poll::Ready(Ok(())) } - pub fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { Ok(()) => { trace!("shut down IO complete"); @@ -691,16 +691,16 @@ where } } - pub fn close_read(&mut self) { + pub(crate) fn close_read(&mut self) { self.state.close_read(); } - pub fn close_write(&mut self) { + pub(crate) fn close_write(&mut self) { self.state.close_write(); } #[cfg(feature = "server")] - pub fn disable_keep_alive(&mut self) { + pub(crate) fn disable_keep_alive(&mut self) { if self.state.is_idle() { trace!("disable_keep_alive; closing idle connection"); self.state.close(); @@ -710,7 +710,7 @@ where } } - pub fn take_error(&mut self) -> crate::Result<()> { + pub(crate) fn take_error(&mut self) -> crate::Result<()> { if let Some(err) = self.state.error.take() { Err(err) } else { diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 73b5dd4ded..4d2707783a 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -17,7 +17,7 @@ use self::Kind::{Chunked, Eof, Length}; /// If a message body does not include a Transfer-Encoding, it *should* /// include a Content-Length header. #[derive(Clone, PartialEq)] -pub struct Decoder { +pub(crate) struct Decoder { kind: Kind, } @@ -65,19 +65,19 @@ enum ChunkedState { impl Decoder { // constructors - pub fn length(x: u64) -> Decoder { + pub(crate) fn length(x: u64) -> Decoder { Decoder { kind: Kind::Length(x), } } - pub fn chunked() -> Decoder { + pub(crate) fn chunked() -> Decoder { Decoder { kind: Kind::Chunked(ChunkedState::Size, 0), } } - pub fn eof() -> Decoder { + pub(crate) fn eof() -> Decoder { Decoder { kind: Kind::Eof(false), } @@ -93,11 +93,11 @@ impl Decoder { // methods - pub fn is_eof(&self) -> bool { + pub(crate) fn is_eof(&self) -> bool { matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true)) } - pub fn decode( + pub(crate) fn decode( &mut self, cx: &mut task::Context<'_>, body: &mut R, diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 8bbb033346..39f457efa3 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -37,7 +37,7 @@ pub(crate) trait Dispatch { cfg_server! { use crate::service::HttpService; - pub struct Server, B> { + pub(crate) struct Server, B> { in_flight: Pin>>, pub(crate) service: S, } @@ -45,7 +45,7 @@ cfg_server! { cfg_client! { #[pin_project::pin_project] - pub struct Client { + pub(crate) struct Client { callback: Option, http::Response>>, #[pin] rx: ClientRx, @@ -68,7 +68,7 @@ where Bs: HttpBody + 'static, Bs::Error: Into>, { - pub fn new(dispatch: D, conn: Conn) -> Self { + pub(crate) fn new(dispatch: D, conn: Conn) -> Self { Dispatcher { conn, dispatch, @@ -79,14 +79,14 @@ where } #[cfg(feature = "server")] - pub fn disable_keep_alive(&mut self) { + pub(crate) fn disable_keep_alive(&mut self) { self.conn.disable_keep_alive(); if self.conn.is_write_closed() { self.close(); } } - pub fn into_inner(self) -> (I, Bytes, D) { + pub(crate) fn into_inner(self) -> (I, Bytes, D) { let (io, buf) = self.conn.into_inner(); (io, buf, self.dispatch) } @@ -454,14 +454,14 @@ cfg_server! { where S: HttpService, { - pub fn new(service: S) -> Server { + pub(crate) fn new(service: S) -> Server { Server { in_flight: Box::pin(None), service, } } - pub fn into_service(self) -> S { + pub(crate) fn into_service(self) -> S { self.service } } @@ -538,7 +538,7 @@ cfg_server! { cfg_client! { impl Client { - pub fn new(rx: ClientRx) -> Client { + pub(crate) fn new(rx: ClientRx) -> Client { Client { callback: None, rx, diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs index c8ed99bbbd..6a3703994a 100644 --- a/src/proto/h1/encode.rs +++ b/src/proto/h1/encode.rs @@ -10,18 +10,18 @@ type StaticBuf = &'static [u8]; /// Encoders to handle different Transfer-Encodings. #[derive(Debug, Clone, PartialEq)] -pub struct Encoder { +pub(crate) struct Encoder { kind: Kind, is_last: bool, } #[derive(Debug)] -pub struct EncodedBuf { +pub(crate) struct EncodedBuf { kind: BufKind, } #[derive(Debug)] -pub struct NotEof; +pub(crate) struct NotEof; #[derive(Debug, PartialEq, Clone)] enum Kind { @@ -54,34 +54,34 @@ impl Encoder { is_last: false, } } - pub fn chunked() -> Encoder { + pub(crate) fn chunked() -> Encoder { Encoder::new(Kind::Chunked) } - pub fn length(len: u64) -> Encoder { + pub(crate) fn length(len: u64) -> Encoder { Encoder::new(Kind::Length(len)) } #[cfg(feature = "server")] - pub fn close_delimited() -> Encoder { + pub(crate) fn close_delimited() -> Encoder { Encoder::new(Kind::CloseDelimited) } - pub fn is_eof(&self) -> bool { + pub(crate) fn is_eof(&self) -> bool { matches!(self.kind, Kind::Length(0)) } #[cfg(feature = "server")] - pub fn set_last(mut self, is_last: bool) -> Self { + pub(crate) fn set_last(mut self, is_last: bool) -> Self { self.is_last = is_last; self } - pub fn is_last(&self) -> bool { + pub(crate) fn is_last(&self) -> bool { self.is_last } - pub fn is_close_delimited(&self) -> bool { + pub(crate) fn is_close_delimited(&self) -> bool { match self.kind { #[cfg(feature = "server")] Kind::CloseDelimited => true, @@ -89,7 +89,7 @@ impl Encoder { } } - pub fn end(&self) -> Result>, NotEof> { + pub(crate) fn end(&self) -> Result>, NotEof> { match self.kind { Kind::Length(0) => Ok(None), Kind::Chunked => Ok(Some(EncodedBuf { @@ -101,7 +101,7 @@ impl Encoder { } } - pub fn encode(&mut self, msg: B) -> EncodedBuf + pub(crate) fn encode(&mut self, msg: B) -> EncodedBuf where B: Buf, { diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index da0ff82079..5536b5d164 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -15,7 +15,7 @@ use crate::common::{task, Pin, Poll}; pub(crate) const INIT_BUFFER_SIZE: usize = 8192; /// The minimum value that can be set to max buffer size. -pub const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; +pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; /// The default maximum read buffer size. If the buffer gets this big and /// a message is still not complete, a `TooLarge` error is triggered. @@ -29,7 +29,7 @@ pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; /// forces a flush if the queue gets this big. const MAX_BUF_LIST_BUFFERS: usize = 16; -pub struct Buffered { +pub(crate) struct Buffered { flush_pipeline: bool, io: T, read_blocked: bool, @@ -55,7 +55,7 @@ where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { - pub fn new(io: T) -> Buffered { + pub(crate) fn new(io: T) -> Buffered { let write_buf = WriteBuf::new(&io); Buffered { flush_pipeline: false, @@ -68,7 +68,7 @@ where } #[cfg(feature = "server")] - pub fn set_flush_pipeline(&mut self, enabled: bool) { + pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { debug_assert!(!self.write_buf.has_remaining()); self.flush_pipeline = enabled; if enabled { @@ -76,7 +76,7 @@ where } } - pub fn set_max_buf_size(&mut self, max: usize) { + pub(crate) fn set_max_buf_size(&mut self, max: usize) { assert!( max >= MINIMUM_MAX_BUFFER_SIZE, "The max_buf_size cannot be smaller than {}.", @@ -87,19 +87,19 @@ where } #[cfg(feature = "client")] - pub fn set_read_buf_exact_size(&mut self, sz: usize) { + pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { self.read_buf_strategy = ReadStrategy::Exact(sz); } #[cfg(feature = "server")] - pub fn set_write_strategy_flatten(&mut self) { + pub(crate) fn set_write_strategy_flatten(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself debug_assert!(self.write_buf.queue.bufs_cnt() == 0); self.write_buf.set_strategy(WriteStrategy::Flatten); } - pub fn read_buf(&self) -> &[u8] { + pub(crate) fn read_buf(&self) -> &[u8] { self.read_buf.as_ref() } @@ -115,7 +115,7 @@ where self.read_buf.capacity() - self.read_buf.len() } - pub fn headers_buf(&mut self) -> &mut Vec { + pub(crate) fn headers_buf(&mut self) -> &mut Vec { let buf = self.write_buf.headers_mut(); &mut buf.bytes } @@ -124,15 +124,15 @@ where &mut self.write_buf } - pub fn buffer>(&mut self, buf: BB) { + pub(crate) fn buffer>(&mut self, buf: BB) { self.write_buf.buffer(buf) } - pub fn can_buffer(&self) -> bool { + pub(crate) fn can_buffer(&self) -> bool { self.flush_pipeline || self.write_buf.can_buffer() } - pub fn consume_leading_lines(&mut self) { + pub(crate) fn consume_leading_lines(&mut self) { if !self.read_buf.is_empty() { let mut i = 0; while i < self.read_buf.len() { @@ -182,7 +182,7 @@ where } } - pub fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.read_blocked = false; let next = self.read_buf_strategy.next(); if self.read_buf_remaining_mut() < next { @@ -212,19 +212,19 @@ where } } - pub fn into_inner(self) -> (T, Bytes) { + pub(crate) fn into_inner(self) -> (T, Bytes) { (self.io, self.read_buf.freeze()) } - pub fn io_mut(&mut self) -> &mut T { + pub(crate) fn io_mut(&mut self) -> &mut T { &mut self.io } - pub fn is_read_blocked(&self) -> bool { + pub(crate) fn is_read_blocked(&self) -> bool { self.read_blocked } - pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { if self.flush_pipeline && !self.read_buf.is_empty() { Poll::Ready(Ok(())) } else if self.write_buf.remaining() == 0 { @@ -293,7 +293,7 @@ where impl Unpin for Buffered {} // TODO: This trait is old... at least rename to PollBytes or something... -pub trait MemRead { +pub(crate) trait MemRead { fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll>; } @@ -402,7 +402,7 @@ impl Default for ReadStrategy { } #[derive(Clone)] -pub struct Cursor { +pub(crate) struct Cursor { bytes: T, pos: usize, } diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 10aa09627b..1498872ea8 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -5,11 +5,11 @@ use crate::body::DecodedLength; use crate::proto::{BodyLength, MessageHead}; pub(crate) use self::conn::Conn; -pub use self::decode::Decoder; +pub(crate) use self::decode::Decoder; pub(crate) use self::dispatch::Dispatcher; -pub use self::encode::{EncodedBuf, Encoder}; -pub use self::io::Cursor; //TODO: move out of h1::io -pub use self::io::MINIMUM_MAX_BUFFER_SIZE; +pub(crate) use self::encode::{EncodedBuf, Encoder}; + //TODO: move out of h1::io +pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; mod conn; mod decode; diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index 200ad9f8a5..eea52e3e4b 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -136,7 +136,7 @@ where } } - pub fn graceful_shutdown(&mut self) { + pub(crate) fn graceful_shutdown(&mut self) { trace!("graceful_shutdown"); match self.state { State::Handshaking { .. } => { diff --git a/src/proto/mod.rs b/src/proto/mod.rs index fe2e2e92ba..0c86336b32 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -17,33 +17,33 @@ cfg_http2! { /// An Incoming Message head. Includes request/status line, and headers. #[derive(Debug, Default)] -pub struct MessageHead { +pub(crate) struct MessageHead { /// HTTP version of the message. - pub version: http::Version, + pub(crate) version: http::Version, /// Subject (request line or status line) of Incoming message. - pub subject: S, + pub(crate) subject: S, /// Headers of the Incoming message. - pub headers: http::HeaderMap, + pub(crate) headers: http::HeaderMap, /// Extensions. extensions: http::Extensions, } /// An incoming request message. #[cfg(feature = "http1")] -pub type RequestHead = MessageHead; +pub(crate) type RequestHead = MessageHead; #[derive(Debug, Default, PartialEq)] #[cfg(feature = "http1")] -pub struct RequestLine(pub http::Method, pub http::Uri); +pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri); /// An incoming response message. #[cfg(feature = "http1")] #[cfg(feature = "client")] -pub type ResponseHead = MessageHead; +pub(crate) type ResponseHead = MessageHead; #[derive(Debug)] #[cfg(feature = "http1")] -pub enum BodyLength { +pub(crate) enum BodyLength { /// Content-Length Known(u64), /// Transfer-Encoding: chunked (if h1) diff --git a/src/server/conn.rs b/src/server/conn.rs index b94b5054b9..50b84dd0d0 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -147,7 +147,7 @@ pub(super) struct SpawnAll { // // See https://github.com/rust-lang/rust/issues/64705 #[pin] - pub serve: Serve, + pub(super) serve: Serve, } /// A future binding a connection with a Service. @@ -815,7 +815,7 @@ impl Default for ConnectionMode { impl Serve { /// Get a reference to the incoming stream. #[inline] - pub fn incoming_ref(&self) -> &I { + pub(super) fn incoming_ref(&self) -> &I { &self.incoming } @@ -1025,7 +1025,7 @@ pub(crate) mod spawn_all { } #[pin_project(project = StateProj)] - pub enum State, E, W: Watcher> { + pub(super) enum State, E, W: Watcher> { Connecting(#[pin] Connecting, W), Connected(#[pin] W::Future), } diff --git a/src/server/tcp.rs b/src/server/tcp.rs index c6cfc98937..91afc40120 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -8,6 +8,7 @@ use tokio::time::Sleep; use crate::common::{task, Future, Pin, Poll}; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::addr_stream::AddrStream; use super::accept::Accept; diff --git a/src/service/make.rs b/src/service/make.rs index 074d66f1b9..63e6f298f1 100644 --- a/src/service/make.rs +++ b/src/service/make.rs @@ -177,6 +177,7 @@ impl fmt::Debug for MakeServiceFn { mod sealed { pub trait Sealed {} + #[allow(unreachable_pub)] // This is intentional. pub trait CantImpl {} #[allow(missing_debug_implementations)] diff --git a/src/service/mod.rs b/src/service/mod.rs index 2c2bf3aaa7..5f156d46cd 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -44,16 +44,16 @@ mod make; mod oneshot; mod util; -pub(crate) use self::http::HttpService; +pub(super) use self::http::HttpService; #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] -pub(crate) use self::make::MakeConnection; +pub(super) use self::make::MakeConnection; #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] -pub(crate) use self::make::MakeServiceRef; +pub(super) use self::make::MakeServiceRef; #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] -pub(crate) use self::oneshot::{oneshot, Oneshot}; +pub(super) use self::oneshot::{oneshot, Oneshot}; pub use self::make::make_service_fn; pub use self::util::service_fn; diff --git a/src/upgrade.rs b/src/upgrade.rs index a981b912ee..6004c1a31a 100644 --- a/src/upgrade.rs +++ b/src/upgrade.rs @@ -63,12 +63,12 @@ pub fn on(msg: T) -> OnUpgrade { } #[cfg(feature = "http1")] -pub(crate) struct Pending { +pub(super) struct Pending { tx: oneshot::Sender>, } #[cfg(feature = "http1")] -pub(crate) fn pending() -> (Pending, OnUpgrade) { +pub(super) fn pending() -> (Pending, OnUpgrade) { let (tx, rx) = oneshot::channel(); (Pending { tx }, OnUpgrade { rx: Some(rx) }) } @@ -77,7 +77,7 @@ pub(crate) fn pending() -> (Pending, OnUpgrade) { impl Upgraded { #[cfg(any(feature = "http1", test))] - pub(crate) fn new(io: T, read_buf: Bytes) -> Self + pub(super) fn new(io: T, read_buf: Bytes) -> Self where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -154,12 +154,12 @@ impl fmt::Debug for Upgraded { // ===== impl OnUpgrade ===== impl OnUpgrade { - pub(crate) fn none() -> Self { + pub(super) fn none() -> Self { OnUpgrade { rx: None } } #[cfg(feature = "http1")] - pub(crate) fn is_none(&self) -> bool { + pub(super) fn is_none(&self) -> bool { self.rx.is_none() } } @@ -189,14 +189,14 @@ impl fmt::Debug for OnUpgrade { #[cfg(feature = "http1")] impl Pending { - pub(crate) fn fulfill(self, upgraded: Upgraded) { + pub(super) fn fulfill(self, upgraded: Upgraded) { trace!("pending upgrade fulfill"); let _ = self.tx.send(Ok(upgraded)); } /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. - pub(crate) fn manual(self) { + pub(super) fn manual(self) { trace!("pending upgrade handled manually"); let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); } @@ -221,7 +221,7 @@ impl StdError for UpgradeExpected {} // ===== impl Io ===== -pub(crate) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { +pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { fn __hyper_type_id(&self) -> TypeId { TypeId::of::() } From 257d6a99193c9404ce055727833e1490c23a7197 Mon Sep 17 00:00:00 2001 From: nickelc Date: Thu, 14 Jan 2021 22:56:49 +0100 Subject: [PATCH 013/420] docs(server): set doc_cfg attributes for `tcp` feature gated Server methods (#2401) --- src/server/server.rs | 58 ++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 26 deletions(-) diff --git a/src/server/server.rs b/src/server/server.rs index 24037a9014..48cc6e2803 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -52,37 +52,43 @@ impl Server { } } -#[cfg(feature = "tcp")] -impl Server { - /// Binds to the provided address, and returns a [`Builder`](Builder). - /// - /// # Panics - /// - /// This method will panic if binding to the address fails. For a method - /// to bind to an address and return a `Result`, see `Server::try_bind`. - pub fn bind(addr: &SocketAddr) -> Builder { - let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { - panic!("error binding to {}: {}", addr, e); - }); - Server::builder(incoming) - } +cfg_feature! { + #![all(feature = "tcp")] + + impl Server { + /// Binds to the provided address, and returns a [`Builder`](Builder). + /// + /// # Panics + /// + /// This method will panic if binding to the address fails. For a method + /// to bind to an address and return a `Result`, see `Server::try_bind`. + pub fn bind(addr: &SocketAddr) -> Builder { + let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { + panic!("error binding to {}: {}", addr, e); + }); + Server::builder(incoming) + } - /// Tries to bind to the provided address, and returns a [`Builder`](Builder). - pub fn try_bind(addr: &SocketAddr) -> crate::Result> { - AddrIncoming::new(addr).map(Server::builder) - } + /// Tries to bind to the provided address, and returns a [`Builder`](Builder). + pub fn try_bind(addr: &SocketAddr) -> crate::Result> { + AddrIncoming::new(addr).map(Server::builder) + } - /// Create a new instance from a `std::net::TcpListener` instance. - pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { - AddrIncoming::from_std(listener).map(Server::builder) + /// Create a new instance from a `std::net::TcpListener` instance. + pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { + AddrIncoming::from_std(listener).map(Server::builder) + } } } -#[cfg(feature = "tcp")] -impl Server { - /// Returns the local address that this server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.spawn_all.local_addr() +cfg_feature! { + #![all(feature = "tcp")] + + impl Server { + /// Returns the local address that this server is bound to. + pub fn local_addr(&self) -> SocketAddr { + self.spawn_all.local_addr() + } } } From bf8d74ad1cf7d0b33b470b1e61625ebac56f9c4c Mon Sep 17 00:00:00 2001 From: Abdullah Eryuzlu <24809834+aeryz@users.noreply.github.com> Date: Fri, 15 Jan 2021 21:14:21 +0300 Subject: [PATCH 014/420] feat(body): add `send_trailers` to Body channel's `Sender` (#2387) Closes #2260 --- src/body/body.rs | 63 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/src/body/body.rs b/src/body/body.rs index e50e9f123e..9c199fd2c8 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -5,8 +5,6 @@ use std::fmt; use bytes::Bytes; use futures_channel::mpsc; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] use futures_channel::oneshot; use futures_core::Stream; // for mpsc::Receiver #[cfg(feature = "stream")] @@ -17,14 +15,16 @@ use http_body::{Body as HttpBody, SizeHint}; use super::DecodedLength; #[cfg(feature = "stream")] use crate::common::sync_wrapper::SyncWrapper; -use crate::common::{task, watch, Pin, Poll}; +use crate::common::Future; #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] -use crate::common::{Future, Never}; +use crate::common::Never; +use crate::common::{task, watch, Pin, Poll}; #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] use crate::proto::h2::ping; type BodySender = mpsc::Sender>; +type TrailersSender = oneshot::Sender; /// A stream of `Bytes`, used when receiving bodies. /// @@ -43,7 +43,8 @@ enum Kind { Chan { content_length: DecodedLength, want_tx: watch::Sender, - rx: mpsc::Receiver>, + data_rx: mpsc::Receiver>, + trailers_rx: oneshot::Receiver, }, #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] H2 { @@ -106,7 +107,8 @@ enum DelayEof { #[must_use = "Sender does nothing unless sent on"] pub struct Sender { want_rx: watch::Receiver, - tx: BodySender, + data_tx: BodySender, + trailers_tx: Option, } const WANT_PENDING: usize = 1; @@ -137,7 +139,8 @@ impl Body { } pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Body) { - let (tx, rx) = mpsc::channel(0); + let (data_tx, data_rx) = mpsc::channel(0); + let (trailers_tx, trailers_rx) = oneshot::channel(); // If wanter is true, `Sender::poll_ready()` won't becoming ready // until the `Body` has been polled for data once. @@ -145,11 +148,16 @@ impl Body { let (want_tx, want_rx) = watch::channel(want); - let tx = Sender { want_rx, tx }; + let tx = Sender { + want_rx, + data_tx, + trailers_tx: Some(trailers_tx), + }; let rx = Body::new(Kind::Chan { content_length, want_tx, - rx, + data_rx, + trailers_rx, }); (tx, rx) @@ -282,12 +290,13 @@ impl Body { Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)), Kind::Chan { content_length: ref mut len, - ref mut rx, + ref mut data_rx, ref mut want_tx, + .. } => { want_tx.send(WANT_READY); - match ready!(Pin::new(rx).poll_next(cx)?) { + match ready!(Pin::new(data_rx).poll_next(cx)?) { Some(chunk) => { len.sub_if(chunk.len() as u64); Poll::Ready(Some(Ok(chunk))) @@ -368,10 +377,15 @@ impl HttpBody for Body { } Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))), }, - + Kind::Chan { + ref mut trailers_rx, + .. + } => match ready!(Pin::new(trailers_rx).poll(cx)) { + Ok(t) => Poll::Ready(Ok(Some(t))), + Err(_) => Poll::Ready(Ok(None)), + }, #[cfg(feature = "ffi")] Kind::Ffi(ref mut body) => body.poll_trailers(cx), - _ => Poll::Ready(Ok(None)), } } @@ -527,7 +541,7 @@ impl Sender { pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { // Check if the receiver end has tried polling for the body yet ready!(self.poll_want(cx)?); - self.tx + self.data_tx .poll_ready(cx) .map_err(|_| crate::Error::new_closed()) } @@ -545,14 +559,23 @@ impl Sender { futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await } - /// Send data on this channel when it is ready. + /// Send data on data channel when it is ready. pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> { self.ready().await?; - self.tx + self.data_tx .try_send(Ok(chunk)) .map_err(|_| crate::Error::new_closed()) } + /// Send trailers on trailers channel. + pub async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> { + let tx = match self.trailers_tx.take() { + Some(tx) => tx, + None => return Err(crate::Error::new_closed()), + }; + tx.send(trailers).map_err(|_| crate::Error::new_closed()) + } + /// Try to send data on this channel. /// /// # Errors @@ -566,7 +589,7 @@ impl Sender { /// that doesn't have an async context. If in an async context, prefer /// `send_data()` instead. pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> { - self.tx + self.data_tx .try_send(Ok(chunk)) .map_err(|err| err.into_inner().expect("just sent Ok")) } @@ -574,7 +597,7 @@ impl Sender { /// Aborts the body in an abnormal fashion. pub fn abort(self) { let _ = self - .tx + .data_tx // clone so the send works even if buffer is full .clone() .try_send(Err(crate::Error::new_body_write_aborted())); @@ -582,7 +605,7 @@ impl Sender { #[cfg(feature = "http1")] pub(crate) fn send_error(&mut self, err: crate::Error) { - let _ = self.tx.try_send(Err(err)); + let _ = self.data_tx.try_send(Err(err)); } } @@ -628,7 +651,7 @@ mod tests { assert_eq!( mem::size_of::(), - mem::size_of::() * 4, + mem::size_of::() * 5, "Sender" ); From f821ecdd197b42253b4930f1d67ae98b5c2028ea Mon Sep 17 00:00:00 2001 From: nickelc Date: Sat, 16 Jan 2021 01:24:03 +0100 Subject: [PATCH 015/420] docs(body): set doc_cfg attribute for `Body::wrap_stream` (#2403) --- src/body/body.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/body/body.rs b/src/body/body.rs index 9c199fd2c8..a515e4b989 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -185,6 +185,7 @@ impl Body { /// This function requires enabling the `stream` feature in your /// `Cargo.toml`. #[cfg(feature = "stream")] + #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] pub fn wrap_stream(stream: S) -> Body where S: Stream> + Send + 'static, From ec2fd4a15f6c0ec612c38a9596c56cd87477d0f9 Mon Sep 17 00:00:00 2001 From: Mike Roelens Date: Tue, 19 Jan 2021 11:30:17 -0500 Subject: [PATCH 016/420] docs(client): fix typo in httpbin.org domain (#2406) --- src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/client.rs b/src/client/client.rs index 3ab708471a..d213b99c8c 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -147,7 +147,7 @@ where /// /// let req = Request::builder() /// .method("POST") - /// .uri("http://httpin.org/post") + /// .uri("http://httpbin.org/post") /// .body(Body::from("Hallo!")) /// .expect("request builder"); /// From 9956587f83428a5dbe338ba0b55c1dc0bce8c282 Mon Sep 17 00:00:00 2001 From: erikdesjardins Date: Tue, 19 Jan 2021 17:50:07 -0500 Subject: [PATCH 017/420] feat(body): reexport `hyper::body::SizeHint` (#2404) --- src/body/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/body/mod.rs b/src/body/mod.rs index e9a5352c13..5e2181e941 100644 --- a/src/body/mod.rs +++ b/src/body/mod.rs @@ -17,6 +17,7 @@ pub use bytes::{Buf, Bytes}; pub use http_body::Body as HttpBody; +pub use http_body::SizeHint; pub use self::aggregate::aggregate; pub use self::body::{Body, Sender}; From 237b2ce08341266b62a8d1cfa974779c511a0710 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Tue, 19 Jan 2021 23:51:34 +0100 Subject: [PATCH 018/420] refactor(lib): Remove useless uses of Pin (#2405) --- src/client/dispatch.rs | 22 +++++++++------------- src/proto/h1/dispatch.rs | 14 ++++++-------- src/proto/h2/client.rs | 2 +- 3 files changed, 16 insertions(+), 22 deletions(-) diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 804eebbfc2..17cfbf4f8c 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -138,25 +138,22 @@ impl Clone for UnboundedSender { } } -#[pin_project::pin_project(PinnedDrop)] pub(crate) struct Receiver { - #[pin] inner: mpsc::UnboundedReceiver>, taker: want::Taker, } impl Receiver { - pub(crate) fn poll_next( - self: Pin<&mut Self>, + pub(crate) fn poll_recv( + &mut self, cx: &mut task::Context<'_>, ) -> Poll)>> { - let mut this = self.project(); - match this.inner.poll_recv(cx) { + match self.inner.poll_recv(cx) { Poll::Ready(item) => { Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped"))) } Poll::Pending => { - this.taker.want(); + self.taker.want(); Poll::Pending } } @@ -177,12 +174,11 @@ impl Receiver { } } -#[pin_project::pinned_drop] -impl PinnedDrop for Receiver { - fn drop(mut self: Pin<&mut Self>) { +impl Drop for Receiver { + fn drop(&mut self) { // Notify the giver about the closure first, before dropping // the mpsc::Receiver. - self.as_mut().taker.cancel(); + self.taker.cancel(); } } @@ -279,8 +275,8 @@ mod tests { impl Future for Receiver { type Output = Option<(T, Callback)>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.poll_next(cx) + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll_recv(cx) } } diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 39f457efa3..88e641e9a4 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -44,10 +44,8 @@ cfg_server! { } cfg_client! { - #[pin_project::pin_project] pub(crate) struct Client { callback: Option, http::Response>>, - #[pin] rx: ClientRx, rx_closed: bool, } @@ -557,12 +555,12 @@ cfg_client! { type RecvItem = crate::proto::ResponseHead; fn poll_msg( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - let this = self.project(); - debug_assert!(!*this.rx_closed); - match this.rx.poll_next(cx) { + let mut this = self.as_mut(); + debug_assert!(!this.rx_closed); + match this.rx.poll_recv(cx) { Poll::Ready(Some((req, mut cb))) => { // check that future hasn't been canceled already match cb.poll_canceled(cx) { @@ -578,7 +576,7 @@ cfg_client! { headers: parts.headers, extensions: parts.extensions, }; - *this.callback = Some(cb); + this.callback = Some(cb); Poll::Ready(Some(Ok((head, body)))) } } @@ -586,7 +584,7 @@ cfg_client! { Poll::Ready(None) => { // user has dropped sender handle trace!("client tx closed"); - *this.rx_closed = true; + this.rx_closed = true; Poll::Ready(None) } Poll::Pending => Poll::Pending, diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 3c837feb9c..4f583f2bfa 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -213,7 +213,7 @@ where } }; - match Pin::new(&mut self.req_rx).poll_next(cx) { + match self.req_rx.poll_recv(cx) { Poll::Ready(Some((req, cb))) => { // check that future hasn't been canceled already if cb.is_canceled() { From 1928682b33f98244435ba6d574677546205a15ec Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 20 Jan 2021 14:53:25 -0800 Subject: [PATCH 019/420] feat(ffi): add HYPERE_INVALID_PEER_MESSAGE error code for parse errors --- capi/include/hyper.h | 4 ++++ src/ffi/error.rs | 3 +++ 2 files changed, 7 insertions(+) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 78934710af..cfc14a25fa 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -54,6 +54,10 @@ typedef enum { An optional hyper feature was not enabled. */ HYPERE_FEATURE_NOT_ENABLED, + /* + The peer sent an HTTP message that could not be parsed. + */ + HYPERE_INVALID_PEER_MESSAGE, } hyper_code; typedef enum { diff --git a/src/ffi/error.rs b/src/ffi/error.rs index 5dfca54ea9..eb563a60d5 100644 --- a/src/ffi/error.rs +++ b/src/ffi/error.rs @@ -20,6 +20,8 @@ pub enum hyper_code { /// An optional hyper feature was not enabled. #[cfg_attr(feature = "http2", allow(unused))] HYPERE_FEATURE_NOT_ENABLED, + /// The peer sent an HTTP message that could not be parsed. + HYPERE_INVALID_PEER_MESSAGE, } // ===== impl hyper_error ===== @@ -30,6 +32,7 @@ impl hyper_error { use crate::error::User; match self.0.kind() { + ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE, ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, // TODO: add more variants From 2c8121f1735aa8efeb0d5e4ef595363c373ba470 Mon Sep 17 00:00:00 2001 From: Ivan Nikulin Date: Tue, 26 Jan 2021 18:36:58 +0000 Subject: [PATCH 020/420] fix(client): HTTP/1 client "Transfer-Encoding" repair code would panic (#2410) Closes #2409 --- src/headers.rs | 6 +++--- tests/client.rs | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/headers.rs b/src/headers.rs index 897aa05d01..68c907f9db 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -117,9 +117,9 @@ pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue // + 2 for ", " let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; let mut buf = BytesMut::with_capacity(new_cap); - buf.copy_from_slice(line.as_bytes()); - buf.copy_from_slice(b", "); - buf.copy_from_slice(CHUNKED.as_bytes()); + buf.extend_from_slice(line.as_bytes()); + buf.extend_from_slice(b", "); + buf.extend_from_slice(CHUNKED.as_bytes()); *line = HeaderValue::from_maybe_shared(buf.freeze()) .expect("original header value plus ascii is valid"); diff --git a/tests/client.rs b/tests/client.rs index 409a1622c0..479500ab3e 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -401,6 +401,36 @@ test! { body: None, } +test! { + name: client_transfer_encoding_repair, + + server: + expected: "\ + GET / HTTP/1.1\r\n\ + transfer-encoding: foo, chunked\r\n\ + host: {addr}\r\n\ + \r\n\ + 5\r\n\ + hello\r\n\ + 0\r\n\r\n\ + ", + reply: REPLY_OK, + + client: + request: { + method: GET, + url: "http://{addr}/", + headers: { + "transfer-encoding" => "foo", + }, + body: "hello", // not Body::empty + }, + response: + status: OK, + headers: {}, + body: None, +} + test! { name: client_get_req_body_chunked_http10, From 9dff00425dc4463a3277cad739dba4d935364851 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 12 Jan 2021 00:11:51 +0100 Subject: [PATCH 021/420] refactor(lib): Use cfg(all(...)) instead of multiple cfg attributes --- src/body/body.rs | 6 ++---- src/client/conn.rs | 8 +++++--- src/client/mod.rs | 3 +-- src/common/exec.rs | 3 +-- src/common/mod.rs | 12 ++++-------- src/headers.rs | 11 ++++------- src/proto/mod.rs | 3 +-- src/server/conn.rs | 24 ++++++++---------------- src/service/mod.rs | 12 ++++-------- 9 files changed, 30 insertions(+), 52 deletions(-) diff --git a/src/body/body.rs b/src/body/body.rs index a515e4b989..1be2c1b0c2 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -16,8 +16,7 @@ use super::DecodedLength; #[cfg(feature = "stream")] use crate::common::sync_wrapper::SyncWrapper; use crate::common::Future; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] use crate::common::Never; use crate::common::{task, watch, Pin, Poll}; #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] @@ -74,8 +73,7 @@ struct Extra { delayed_eof: Option, } -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] type DelayEofUntil = oneshot::Receiver; enum DelayEof { diff --git a/src/client/conn.rs b/src/client/conn.rs index fd25741f0f..2799c61eff 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -51,8 +51,7 @@ use std::fmt; #[cfg(feature = "http2")] use std::marker::PhantomData; use std::sync::Arc; -#[cfg(feature = "runtime")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "runtime", feature = "http2"))] use std::time::Duration; use bytes::Bytes; @@ -63,7 +62,10 @@ use tower_service::Service; use super::dispatch; use crate::body::HttpBody; -use crate::common::{task, exec::{BoxSendFuture, Exec}, Future, Pin, Poll}; +use crate::common::{ + exec::{BoxSendFuture, Exec}, + task, Future, Pin, Poll, +}; use crate::proto; use crate::rt::Executor; #[cfg(feature = "http1")] diff --git a/src/client/mod.rs b/src/client/mod.rs index 9600a7640d..e45608cd4c 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -52,8 +52,7 @@ pub use self::connect::HttpConnector; pub mod connect; -#[cfg(test)] -#[cfg(feature = "runtime")] +#[cfg(all(test, feature = "runtime"))] mod tests; cfg_feature! { diff --git a/src/common/exec.rs b/src/common/exec.rs index c52482a336..f7560efbfc 100644 --- a/src/common/exec.rs +++ b/src/common/exec.rs @@ -5,8 +5,7 @@ use std::sync::Arc; #[cfg(feature = "server")] use crate::body::{Body, HttpBody}; -#[cfg(feature = "http2")] -#[cfg(feature = "server")] +#[cfg(all(feature = "http2", feature = "server"))] use crate::proto::h2::server::H2Stream; use crate::rt::Executor; #[cfg(feature = "server")] diff --git a/src/common/mod.rs b/src/common/mod.rs index a5947315b8..050c61d15c 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -8,17 +8,14 @@ macro_rules! ready { } pub(crate) mod buf; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod date; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod drain; #[cfg(any(feature = "http1", feature = "http2"))] pub(crate) mod exec; pub(crate) mod io; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] mod lazy; mod never; #[cfg(feature = "stream")] @@ -26,8 +23,7 @@ pub(crate) mod sync_wrapper; pub(crate) mod task; pub(crate) mod watch; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] pub(crate) use self::lazy::{lazy, Started as Lazy}; #[cfg(any( feature = "client", diff --git a/src/headers.rs b/src/headers.rs index 68c907f9db..7fc486a80c 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -2,10 +2,9 @@ use bytes::BytesMut; use http::header::CONTENT_LENGTH; use http::header::{HeaderValue, ValueIter}; -#[cfg(feature = "http2")] -#[cfg(feature = "client")] -use http::Method; use http::HeaderMap; +#[cfg(all(feature = "http2", feature = "client"))] +use http::Method; #[cfg(feature = "http1")] pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { @@ -29,8 +28,7 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool { false } -#[cfg(feature = "http1")] -#[cfg(feature = "server")] +#[cfg(all(feature = "http1", feature = "server"))] pub(super) fn content_length_parse(value: &HeaderValue) -> Option { value.to_str().ok().and_then(|s| s.parse().ok()) } @@ -66,8 +64,7 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> } } -#[cfg(feature = "http2")] -#[cfg(feature = "client")] +#[cfg(all(feature = "http2", feature = "client"))] pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { match *method { Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 0c86336b32..6f422078f6 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -37,8 +37,7 @@ pub(crate) type RequestHead = MessageHead; pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri); /// An incoming response message. -#[cfg(feature = "http1")] -#[cfg(feature = "client")] +#[cfg(all(feature = "http1", feature = "client"))] pub(crate) type ResponseHead = MessageHead; #[derive(Debug)] diff --git a/src/server/conn.rs b/src/server/conn.rs index 50b84dd0d0..5137708fcb 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -49,8 +49,7 @@ use std::fmt; use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; -#[cfg(feature = "runtime")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "runtime", feature = "http2"))] use std::time::Duration; use bytes::Bytes; @@ -63,8 +62,7 @@ use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; #[cfg(feature = "http2")] use crate::common::io::Rewind; use crate::common::{task, Future, Pin, Poll, Unpin}; -#[cfg(feature = "http1")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "http1", feature = "http2"))] use crate::error::{Kind, Parse}; use crate::proto; use crate::service::{HttpService, MakeServiceRef}; @@ -107,8 +105,7 @@ enum ConnectionMode { #[cfg(feature = "http2")] H2Only, /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs. - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "http1", feature = "http2"))] Fallback, } @@ -160,8 +157,7 @@ where S: HttpService, { pub(super) conn: Option>, - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "http1", feature = "http2"))] fallback: Fallback, } @@ -186,16 +182,14 @@ where H2(#[pin] proto::h2::Server, S, B, E>), } -#[cfg(feature = "http1")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "http1", feature = "http2"))] #[derive(Clone, Debug)] enum Fallback { ToHttp2(proto::h2::server::Config, E), Http1Only, } -#[cfg(feature = "http1")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "http1", feature = "http2"))] impl Fallback { fn to_h2(&self) -> bool { match *self { @@ -205,8 +199,7 @@ impl Fallback { } } -#[cfg(feature = "http1")] -#[cfg(feature = "http2")] +#[cfg(all(feature = "http1", feature = "http2"))] impl Unpin for Fallback {} /// Deconstructed parts of a `Connection`. @@ -701,8 +694,7 @@ where }) } - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "http1", feature = "http2"))] fn upgrade_h2(&mut self) { trace!("Trying to upgrade connection to h2"); let conn = self.conn.take(); diff --git a/src/service/mod.rs b/src/service/mod.rs index 5f156d46cd..22f850ca47 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -39,20 +39,16 @@ pub use tower_service::Service; mod http; mod make; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] mod oneshot; mod util; pub(super) use self::http::HttpService; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] pub(super) use self::make::MakeConnection; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "server")] +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))] pub(super) use self::make::MakeServiceRef; -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg(feature = "client")] +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] pub(super) use self::oneshot::{oneshot, Oneshot}; pub use self::make::make_service_fn; From 43412a950f2052e7865eb596c1d39067b2002a94 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 12 Jan 2021 00:12:21 +0100 Subject: [PATCH 022/420] refactor(lib): Switch from pin-project to pin-project-lite --- Cargo.toml | 2 +- src/client/conn.rs | 49 ++++---- src/client/connect/http.rs | 27 ++--- src/client/pool.rs | 25 +++-- src/common/drain.rs | 19 ++-- src/proto/h1/dispatch.rs | 11 +- src/proto/h2/mod.rs | 21 ++-- src/proto/h2/server.rs | 72 +++++++----- src/server/accept.rs | 14 ++- src/server/conn.rs | 222 +++++++++++++++++++++---------------- src/server/server.rs | 23 ++-- src/server/shutdown.rs | 43 +++---- src/server/tcp.rs | 15 +-- src/service/oneshot.rs | 48 ++++---- 14 files changed, 336 insertions(+), 255 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f5357ed5ba..4a0f84dad1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ httparse = "1.0" h2 = { version = "0.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } -pin-project = "1.0" +pin-project-lite = "0.2.4" tower-service = "0.3" tokio = { version = "1", features = ["sync"] } want = "0.3" diff --git a/src/client/conn.rs b/src/client/conn.rs index 2799c61eff..62cde0c068 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -56,7 +56,7 @@ use std::time::Duration; use bytes::Bytes; use futures_util::future::{self, Either, FutureExt as _}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; @@ -75,15 +75,23 @@ use crate::{Body, Request, Response}; #[cfg(feature = "http1")] type Http1Dispatcher = proto::dispatch::Dispatcher, B, T, R>; -#[pin_project(project = ProtoClientProj)] -enum ProtoClient -where - B: HttpBody, -{ - #[cfg(feature = "http1")] - H1(#[pin] Http1Dispatcher), - #[cfg(feature = "http2")] - H2(#[pin] proto::h2::ClientTask, PhantomData), +pin_project! { + #[project = ProtoClientProj] + enum ProtoClient + where + B: HttpBody, + { + #[cfg(feature = "http1")] + H1 { + #[pin] + h1: Http1Dispatcher, + }, + #[cfg(feature = "http2")] + H2 { + #[pin] + h2: proto::h2::ClientTask, _phantom: PhantomData, + }, + } } /// Returns a handshake future over some IO. @@ -400,7 +408,7 @@ where pub fn into_parts(self) -> Parts { match self.inner.expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1(h1) => { + ProtoClient::H1 { h1 } => { let (io, read_buf, _) = h1.into_inner(); Parts { io, @@ -409,7 +417,7 @@ where } } #[cfg(feature = "http2")] - ProtoClient::H2(..) => { + ProtoClient::H2 { .. } => { panic!("http2 cannot into_inner"); } } @@ -429,9 +437,9 @@ where pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match *self.inner.as_mut().expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx), + ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), #[cfg(feature = "http2")] - ProtoClient::H2(ref mut h2, _) => Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()), } } @@ -460,7 +468,7 @@ where proto::Dispatched::Shutdown => Poll::Ready(Ok(())), #[cfg(feature = "http1")] proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(ProtoClient::H1(h1)) => { + Some(ProtoClient::H1 { h1 }) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); Poll::Ready(Ok(())) @@ -707,14 +715,17 @@ impl Builder { } let cd = proto::h1::dispatch::Client::new(rx); let dispatch = proto::h1::Dispatcher::new(cd, conn); - ProtoClient::H1(dispatch) + ProtoClient::H1 { h1: dispatch } } #[cfg(feature = "http2")] Proto::Http2 => { let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) .await?; - ProtoClient::H2(h2, PhantomData) + ProtoClient::H2 { + h2, + _phantom: PhantomData, + } } }; @@ -768,9 +779,9 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoClientProj::H1(c) => c.poll(cx), + ProtoClientProj::H1 { h1 } => h1.poll(cx), #[cfg(feature = "http2")] - ProtoClientProj::H2(c, _) => c.poll(cx), + ProtoClientProj::H2 { h2, .. } => h2.poll(cx), } } } diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 734aea188a..17339f4179 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -11,7 +11,7 @@ use std::time::Duration; use futures_util::future::Either; use http::uri::{Scheme, Uri}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; @@ -373,18 +373,19 @@ impl HttpInfo { } } -// Not publicly exported (so missing_docs doesn't trigger). -// -// We return this `Future` instead of the `Pin>` directly -// so that users don't rely on it fitting in a `Pin>` slot -// (and thus we can change the type in the future). -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[allow(missing_debug_implementations)] -pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, +pin_project! { + // Not publicly exported (so missing_docs doesn't trigger). + // + // We return this `Future` instead of the `Pin>` directly + // so that users don't rely on it fitting in a `Pin>` slot + // (and thus we can change the type in the future). + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct HttpConnecting { + #[pin] + fut: BoxConnecting, + _marker: PhantomData, + } } type ConnectResult = Result; diff --git a/src/client/pool.rs b/src/client/pool.rs index 0f22657bd4..94f73f6afd 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -11,7 +11,7 @@ use futures_channel::oneshot; use tokio::time::{Duration, Instant, Interval}; use super::client::Ver; -use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin}; +use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] @@ -714,16 +714,17 @@ impl Expiration { } #[cfg(feature = "runtime")] -#[pin_project::pin_project] -struct IdleTask { - #[pin] - interval: Interval, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - #[pin] - pool_drop_notifier: oneshot::Receiver, +pin_project_lite::pin_project! { + struct IdleTask { + #[pin] + interval: Interval, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + #[pin] + pool_drop_notifier: oneshot::Receiver, + } } #[cfg(feature = "runtime")] @@ -776,7 +777,7 @@ mod tests { use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{task, exec::Exec, Future, Pin}; + use crate::common::{exec::Exec, task, Future, Pin}; /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] diff --git a/src/common/drain.rs b/src/common/drain.rs index 4bb2ecc118..174da876df 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -1,6 +1,6 @@ use std::mem; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::sync::watch; use super::{task, Future, Pin, Poll}; @@ -21,14 +21,15 @@ pub(crate) struct Watch { rx: watch::Receiver<()>, } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Watching { - #[pin] - future: F, - state: State, - watch: Pin + Send + Sync>>, - _rx: watch::Receiver<()>, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Watching { + #[pin] + future: F, + state: State, + watch: Pin + Send + Sync>>, + _rx: watch::Receiver<()>, + } } enum State { diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 88e641e9a4..1a72450b15 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -44,10 +44,13 @@ cfg_server! { } cfg_client! { - pub(crate) struct Client { - callback: Option, http::Response>>, - rx: ClientRx, - rx_closed: bool, + pin_project_lite::pin_project! { + pub(crate) struct Client { + callback: Option, http::Response>>, + #[pin] + rx: ClientRx, + rx_closed: bool, + } } type ClientRx = crate::client::dispatch::Receiver, http::Response>; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index cf06592903..cf78e3f18c 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -5,7 +5,7 @@ use http::header::{ TRANSFER_ENCODING, UPGRADE, }; use http::HeaderMap; -use pin_project::pin_project; +use pin_project_lite::pin_project; use std::error::Error as StdError; use std::io::IoSlice; @@ -94,15 +94,16 @@ fn decode_content_length(headers: &HeaderMap) -> DecodedLength { // body adapters used by both Client and Server -#[pin_project] -struct PipeToSendStream -where - S: HttpBody, -{ - body_tx: SendStream>, - data_done: bool, - #[pin] - stream: S, +pin_project! { + struct PipeToSendStream + where + S: HttpBody, + { + body_tx: SendStream>, + data_done: bool, + #[pin] + stream: S, + } } impl PipeToSendStream diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index eea52e3e4b..167dd90dbb 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -5,7 +5,7 @@ use std::time::Duration; use h2::server::{Connection, Handshake, SendResponse}; use h2::Reason; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; @@ -57,15 +57,16 @@ impl Default for Config { } } -#[pin_project] -pub(crate) struct Server -where - S: HttpService, - B: HttpBody, -{ - exec: E, - service: S, - state: State, +pin_project! { + pub(crate) struct Server + where + S: HttpService, + B: HttpBody, + { + exec: E, + service: S, + state: State, + } } enum State @@ -315,24 +316,33 @@ where } } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct H2Stream -where - B: HttpBody, -{ - reply: SendResponse>, - #[pin] - state: H2StreamState, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct H2Stream + where + B: HttpBody, + { + reply: SendResponse>, + #[pin] + state: H2StreamState, + } } -#[pin_project(project = H2StreamStateProj)] -enum H2StreamState -where - B: HttpBody, -{ - Service(#[pin] F), - Body(#[pin] PipeToSendStream), +pin_project! { + #[project = H2StreamStateProj] + enum H2StreamState + where + B: HttpBody, + { + Service { + #[pin] + fut: F, + }, + Body { + #[pin] + pipe: PipeToSendStream, + }, + } } impl H2Stream @@ -342,7 +352,7 @@ where fn new(fut: F, respond: SendResponse>) -> H2Stream { H2Stream { reply: respond, - state: H2StreamState::Service(fut), + state: H2StreamState::Service { fut }, } } } @@ -371,7 +381,7 @@ where let mut me = self.project(); loop { let next = match me.state.as_mut().project() { - H2StreamStateProj::Service(h) => { + H2StreamStateProj::Service { fut: h } => { let res = match h.poll(cx) { Poll::Ready(Ok(r)) => r, Poll::Pending => { @@ -409,13 +419,15 @@ where if !body.is_end_stream() { let body_tx = reply!(me, res, false); - H2StreamState::Body(PipeToSendStream::new(body, body_tx)) + H2StreamState::Body { + pipe: PipeToSendStream::new(body, body_tx), + } } else { reply!(me, res, true); return Poll::Ready(Ok(())); } } - H2StreamStateProj::Body(pipe) => { + H2StreamStateProj::Body { pipe } => { return pipe.poll(cx); } }; diff --git a/src/server/accept.rs b/src/server/accept.rs index 4ec287129d..4b7a1487dd 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -9,7 +9,7 @@ #[cfg(feature = "stream")] use futures_core::Stream; #[cfg(feature = "stream")] -use pin_project::pin_project; +use pin_project_lite::pin_project; use crate::common::{ task::{self, Poll}, @@ -86,8 +86,12 @@ pub fn from_stream(stream: S) -> impl Accept where S: Stream>, { - #[pin_project] - struct FromStream(#[pin] S); + pin_project! { + struct FromStream { + #[pin] + stream: S, + } + } impl Accept for FromStream where @@ -99,9 +103,9 @@ where self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - self.project().0.poll_next(cx) + self.project().stream.poll_next(cx) } } - FromStream(stream) + FromStream { stream } } diff --git a/src/server/conn.rs b/src/server/conn.rs index 5137708fcb..112bbe535d 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -45,7 +45,6 @@ use std::error::Error as StdError; use std::fmt; -#[cfg(feature = "http1")] use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; @@ -53,7 +52,7 @@ use std::net::SocketAddr; use std::time::Duration; use bytes::Bytes; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -109,77 +108,85 @@ enum ConnectionMode { Fallback, } -/// A stream mapping incoming IOs to new services. -/// -/// Yields `Connecting`s that are futures that should be put on a reactor. -#[must_use = "streams do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct Serve { - #[pin] - incoming: I, - make_service: S, - protocol: Http, +pin_project! { + /// A stream mapping incoming IOs to new services. + /// + /// Yields `Connecting`s that are futures that should be put on a reactor. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub(super) struct Serve { + #[pin] + incoming: I, + make_service: S, + protocol: Http, + } } -/// A future building a new `Service` to a `Connection`. -/// -/// Wraps the future returned from `MakeService` into one that returns -/// a `Connection`. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http, +pin_project! { + /// A future building a new `Service` to a `Connection`. + /// + /// Wraps the future returned from `MakeService` into one that returns + /// a `Connection`. + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + pub struct Connecting { + #[pin] + future: F, + io: Option, + protocol: Http, + } } -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct SpawnAll { - // TODO: re-add `pub(super)` once rustdoc can handle this. - // - // See https://github.com/rust-lang/rust/issues/64705 - #[pin] - pub(super) serve: Serve, +pin_project! { + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + pub(super) struct SpawnAll { + // TODO: re-add `pub(super)` once rustdoc can handle this. + // + // See https://github.com/rust-lang/rust/issues/64705 + #[pin] + pub(super) serve: Serve, + } } -/// A future binding a connection with a Service. -/// -/// Polling this future will drive HTTP forward. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -pub struct Connection -where - S: HttpService, -{ - pub(super) conn: Option>, - #[cfg(all(feature = "http1", feature = "http2"))] - fallback: Fallback, +pin_project! { + /// A future binding a connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + pub(super) conn: Option>, + fallback: Fallback, + } } -#[pin_project(project = ProtoServerProj)] -pub(super) enum ProtoServer -where - S: HttpService, - B: HttpBody, -{ - #[cfg(feature = "http1")] - H1( - #[pin] - proto::h1::Dispatcher< - proto::h1::dispatch::Server, - B, - T, - proto::ServerTransaction, - >, - PhantomData, - ), - #[cfg(feature = "http2")] - H2(#[pin] proto::h2::Server, S, B, E>), +pin_project! { + #[project = ProtoServerProj] + pub(super) enum ProtoServer + where + S: HttpService, + B: HttpBody, + { + #[cfg(feature = "http1")] + H1 { + #[pin] + h1: proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, + >, + _phantom: PhantomData, + }, + #[cfg(feature = "http2")] + H2 { + #[pin] + h2: proto::h2::Server, S, B, E>, + }, + } } #[cfg(all(feature = "http1", feature = "http2"))] @@ -189,6 +196,10 @@ enum Fallback { Http1Only, } +#[cfg(not(all(feature = "http1", feature = "http2")))] +#[derive(Clone, Debug)] +struct Fallback(PhantomData); + #[cfg(all(feature = "http1", feature = "http2"))] impl Fallback { fn to_h2(&self) -> bool { @@ -519,7 +530,10 @@ impl Http { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); - ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn), PhantomData) + ProtoServer::H1 { + h1: proto::h1::Dispatcher::new(sd, conn), + _phantom: PhantomData, + } }}; } @@ -535,7 +549,7 @@ impl Http { let rewind_io = Rewind::new(io); let h2 = proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); - ProtoServer::H2(h2) + ProtoServer::H2 { h2 } } }; @@ -590,14 +604,14 @@ where /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. - pub fn graceful_shutdown(self: Pin<&mut Self>) { - match self.project().conn { + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + match self.conn { #[cfg(feature = "http1")] - Some(ProtoServer::H1(ref mut h1, _)) => { + Some(ProtoServer::H1 { ref mut h1, .. }) => { h1.disable_keep_alive(); } #[cfg(feature = "http2")] - Some(ProtoServer::H2(ref mut h2)) => { + Some(ProtoServer::H2 { ref mut h2 }) => { h2.graceful_shutdown(); } None => (), @@ -624,7 +638,7 @@ where pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1(h1, _) => { + ProtoServer::H1 { h1, .. } => { let (io, read_buf, dispatch) = h1.into_inner(); Some(Parts { io, @@ -634,7 +648,7 @@ where }) } #[cfg(feature = "http2")] - ProtoServer::H2(_h2) => None, + ProtoServer::H2 { .. } => None, } } @@ -658,7 +672,7 @@ where loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1(ref mut h1, _) => match ready!(h1.poll_without_shutdown(cx)) { + ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { Ok(()) => return Poll::Ready(Ok(())), Err(e) => { #[cfg(feature = "http2")] @@ -674,7 +688,7 @@ where } }, #[cfg(feature = "http2")] - ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), }; } } @@ -700,8 +714,8 @@ where let conn = self.conn.take(); let (io, read_buf, dispatch) = match conn.unwrap() { - ProtoServer::H1(h1, _) => h1.into_inner(), - ProtoServer::H2(_h2) => { + ProtoServer::H1 { h1, .. } => h1.into_inner(), + ProtoServer::H2 { .. } => { panic!("h2 cannot into_inner"); } }; @@ -714,7 +728,7 @@ where let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); debug_assert!(self.conn.is_none()); - self.conn = Some(ProtoServer::H2(h2)); + self.conn = Some(ProtoServer::H2 { h2 }); } /// Enable this connection to support higher-level HTTP upgrades. @@ -948,9 +962,9 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoServerProj::H1(s, _) => s.poll(cx), + ProtoServerProj::H1 { h1, .. } => h1.poll(cx), #[cfg(feature = "http2")] - ProtoServerProj::H2(s) => s.poll(cx), + ProtoServerProj::H2 { h2 } => h2.poll(cx), } } } @@ -964,7 +978,7 @@ pub(crate) mod spawn_all { use crate::common::exec::ConnStreamExec; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; - use pin_project::pin_project; + use pin_project_lite::pin_project; // Used by `SpawnAll` to optionally watch a `Connection` future. // @@ -1009,23 +1023,36 @@ pub(crate) mod spawn_all { // Users cannot import this type, nor the associated `NewSvcExec`. Instead, // a blanket implementation for `Executor` is sufficient. - #[pin_project] - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, + pin_project! { + #[allow(missing_debug_implementations)] + pub struct NewSvcTask, E, W: Watcher> { + #[pin] + state: State, + } } - #[pin_project(project = StateProj)] - pub(super) enum State, E, W: Watcher> { - Connecting(#[pin] Connecting, W), - Connected(#[pin] W::Future), + pin_project! { + #[project = StateProj] + pub(super) enum State, E, W: Watcher> { + Connecting { + #[pin] + connecting: Connecting, + watcher: W, + }, + Connected { + #[pin] + future: W::Future, + }, + } } impl, E, W: Watcher> NewSvcTask { pub(super) fn new(connecting: Connecting, watcher: W) -> Self { NewSvcTask { - state: State::Connecting(connecting, watcher), + state: State::Connecting { + connecting, + watcher, + }, } } } @@ -1052,7 +1079,10 @@ pub(crate) mod spawn_all { loop { let next = { match me.state.as_mut().project() { - StateProj::Connecting(connecting, watcher) => { + StateProj::Connecting { + connecting, + watcher, + } => { let res = ready!(connecting.poll(cx)); let conn = match res { Ok(conn) => conn, @@ -1062,10 +1092,10 @@ pub(crate) mod spawn_all { return Poll::Ready(()); } }; - let connected = watcher.watch(conn.with_upgrades()); - State::Connected(connected) + let future = watcher.watch(conn.with_upgrades()); + State::Connected { future } } - StateProj::Connected(future) => { + StateProj::Connected { future } => { return future.poll(cx).map(|res| { if let Err(err) = res { debug!("connection error: {}", err); @@ -1133,7 +1163,7 @@ mod upgrades { #[cfg(feature = "http1")] Ok(proto::Dispatched::Upgrade(pending)) => { match self.inner.conn.take() { - Some(ProtoServer::H1(h1, _)) => { + Some(ProtoServer::H1 { h1, .. }) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); return Poll::Ready(Ok(())); diff --git a/src/server/server.rs b/src/server/server.rs index 48cc6e2803..07d9e5fbb0 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -6,7 +6,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener}; #[cfg(feature = "tcp")] use std::time::Duration; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -21,16 +21,17 @@ use super::shutdown::{Graceful, GracefulWatcher}; #[cfg(feature = "tcp")] use super::tcp::AddrIncoming; -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// `Server` is a `Future` mapping a bound listener with a set of service -/// handlers. It is built using the [`Builder`](Builder), and the future -/// completes when the server has been shutdown. It should be run by an -/// `Executor`. -#[pin_project] -pub struct Server { - #[pin] - spawn_all: SpawnAll, +pin_project! { + /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. + /// + /// `Server` is a `Future` mapping a bound listener with a set of service + /// handlers. It is built using the [`Builder`](Builder), and the future + /// completes when the server has been shutdown. It should be run by an + /// `Executor`. + pub struct Server { + #[pin] + spawn_all: SpawnAll, + } } /// A builder for a [`Server`](Server). diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index e54ba42104..122853ac17 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -1,33 +1,36 @@ use std::error::Error as StdError; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; -use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; use super::accept::Accept; +use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::{HttpService, MakeServiceRef}; -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Graceful { - #[pin] - state: State, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Graceful { + #[pin] + state: State, + } } -#[pin_project(project = StateProj)] -pub(super) enum State { - Running { - drain: Option<(Signal, Watch)>, - #[pin] - spawn_all: SpawnAll, - #[pin] - signal: F, - }, - Draining(Draining), +pin_project! { + #[project = StateProj] + pub(super) enum State { + Running { + drain: Option<(Signal, Watch)>, + #[pin] + spawn_all: SpawnAll, + #[pin] + signal: F, + }, + Draining { draining: Draining }, + } } impl Graceful { @@ -71,14 +74,16 @@ where Poll::Ready(()) => { debug!("signal received, starting graceful shutdown"); let sig = drain.take().expect("drain channel").0; - State::Draining(sig.drain()) + State::Draining { + draining: sig.drain(), + } } Poll::Pending => { let watch = drain.as_ref().expect("drain channel").1.clone(); return spawn_all.poll_watch(cx, &GracefulWatcher(watch)); } }, - StateProj::Draining(ref mut draining) => { + StateProj::Draining { ref mut draining } => { return Pin::new(draining).poll(cx).map(Ok); } } diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 91afc40120..46c570decd 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -229,13 +229,14 @@ mod addr_stream { use crate::common::{task, Pin, Poll}; - /// A transport returned yieled by `AddrIncoming`. - #[pin_project::pin_project] - #[derive(Debug)] - pub struct AddrStream { - #[pin] - inner: TcpStream, - pub(super) remote_addr: SocketAddr, + pin_project_lite::pin_project! { + /// A transport returned yieled by `AddrIncoming`. + #[derive(Debug)] + pub struct AddrStream { + #[pin] + inner: TcpStream, + pub(super) remote_addr: SocketAddr, + } } impl AddrStream { diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs index 766d0c4689..2697af8f4c 100644 --- a/src/service/oneshot.rs +++ b/src/service/oneshot.rs @@ -1,6 +1,6 @@ // TODO: Eventually to be replaced with tower_util::Oneshot. -use pin_project::pin_project; +use pin_project_lite::pin_project; use tower_service::Service; use crate::common::{task, Future, Pin, Poll}; @@ -10,25 +10,35 @@ where S: Service, { Oneshot { - state: State::NotReady(svc, req), + state: State::NotReady { svc, req }, } } -// A `Future` consuming a `Service` and request, waiting until the `Service` -// is ready, and then calling `Service::call` with the request, and -// waiting for that `Future`. -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Oneshot, Req> { - #[pin] - state: State, +pin_project! { + // A `Future` consuming a `Service` and request, waiting until the `Service` + // is ready, and then calling `Service::call` with the request, and + // waiting for that `Future`. + #[allow(missing_debug_implementations)] + pub struct Oneshot, Req> { + #[pin] + state: State, + } } -#[pin_project(project = StateProj, project_replace = StateProjOwn)] -enum State, Req> { - NotReady(S, Req), - Called(#[pin] S::Future), - Tmp, +pin_project! { + #[project = StateProj] + #[project_replace = StateProjOwn] + enum State, Req> { + NotReady { + svc: S, + req: Req, + }, + Called { + #[pin] + fut: S::Future, + }, + Tmp, + } } impl Future for Oneshot @@ -42,19 +52,19 @@ where loop { match me.state.as_mut().project() { - StateProj::NotReady(ref mut svc, _) => { + StateProj::NotReady { ref mut svc, .. } => { ready!(svc.poll_ready(cx))?; // fallthrough out of the match's borrow } - StateProj::Called(fut) => { + StateProj::Called { fut } => { return fut.poll(cx); } StateProj::Tmp => unreachable!(), } match me.state.as_mut().project_replace(State::Tmp) { - StateProjOwn::NotReady(mut svc, req) => { - me.state.set(State::Called(svc.call(req))); + StateProjOwn::NotReady { mut svc, req } => { + me.state.set(State::Called { fut: svc.call(req) }); } _ => unreachable!(), } From 5e8238c1b8d6351d42546a4423cf5598def1c35e Mon Sep 17 00:00:00 2001 From: Michal 'vorner' Vaner Date: Tue, 2 Feb 2021 18:34:40 +0100 Subject: [PATCH 023/420] docs(body): warn about no length check in aggregate (#2415) The to_bytes and aggregate don't check how long the body is, so the user better be aware. Relates to #2414. --- src/body/aggregate.rs | 6 ++++++ src/body/to_bytes.rs | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/src/body/aggregate.rs b/src/body/aggregate.rs index 97b6c2d91f..99662419d3 100644 --- a/src/body/aggregate.rs +++ b/src/body/aggregate.rs @@ -7,6 +7,12 @@ use crate::common::buf::BufList; /// /// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without /// copying them. This is ideal if you don't require a contiguous buffer. +/// +/// # Note +/// +/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length +/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the +/// `Content-Length` is a possibility, but it is not strictly mandated to be present. pub async fn aggregate(body: T) -> Result where T: HttpBody, diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs index 7c0765f486..3ec7a7654b 100644 --- a/src/body/to_bytes.rs +++ b/src/body/to_bytes.rs @@ -7,6 +7,12 @@ use super::HttpBody; /// This may require copying the data into a single buffer. If you don't need /// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate()) /// function. +/// +/// # Note +/// +/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length +/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the +/// `Content-Length` is a possibility, but it is not strictly mandated to be present. pub async fn to_bytes(body: T) -> Result where T: HttpBody, From 4d2125c67c8087de863f74278a017c4caf37e6a9 Mon Sep 17 00:00:00 2001 From: Michal 'vorner' Vaner Date: Wed, 3 Feb 2021 02:09:06 +0100 Subject: [PATCH 024/420] perf(body): specialize BufList::copy_to_bytes (#2413) Some implementations of the Buf trait have an optimized version (for example Bytes) of copy_to_bytes, opportunistically use that one. --- src/common/buf.rs | 80 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/src/common/buf.rs b/src/common/buf.rs index 9c8feae617..64e9333ead 100644 --- a/src/common/buf.rs +++ b/src/common/buf.rs @@ -1,7 +1,7 @@ use std::collections::VecDeque; use std::io::IoSlice; -use bytes::Buf; +use bytes::{Buf, BufMut, Bytes, BytesMut}; pub(crate) struct BufList { bufs: VecDeque, @@ -70,4 +70,82 @@ impl Buf for BufList { } vecs } + + #[inline] + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole + // request can be fulfilled by the front buffer, we can take advantage. + match self.bufs.front_mut() { + Some(front) if front.remaining() == len => { + let b = front.copy_to_bytes(len); + self.bufs.pop_front(); + b + } + Some(front) if front.remaining() > len => front.copy_to_bytes(len), + _ => { + assert!(len <= self.remaining(), "`len` greater than remaining"); + let mut bm = BytesMut::with_capacity(len); + bm.put(self.take(len)); + bm.freeze() + } + } + } +} + +#[cfg(test)] +mod tests { + use std::ptr; + + use super::*; + + fn hello_world_buf() -> BufList { + BufList { + bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(), + } + } + + #[test] + fn to_bytes_shorter() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(4); + assert_eq!(start, "Hell"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b"o"); + assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); + assert_eq!(bufs.remaining(), 7); + } + + #[test] + fn to_bytes_eq() { + let mut bufs = hello_world_buf(); + let old_ptr = bufs.chunk().as_ptr(); + let start = bufs.copy_to_bytes(5); + assert_eq!(start, "Hello"); + assert!(ptr::eq(old_ptr, start.as_ptr())); + assert_eq!(bufs.chunk(), b" "); + assert_eq!(bufs.remaining(), 6); + } + + #[test] + fn to_bytes_longer() { + let mut bufs = hello_world_buf(); + let start = bufs.copy_to_bytes(7); + assert_eq!(start, "Hello W"); + assert_eq!(bufs.remaining(), 4); + } + + #[test] + fn one_long_buf_to_bytes() { + let mut buf = BufList::new(); + buf.push(b"Hello World" as &[_]); + assert_eq!(buf.copy_to_bytes(5), "Hello"); + assert_eq!(buf.chunk(), b" World"); + } + + #[test] + #[should_panic(expected = "`len` greater than remaining")] + fn buf_to_bytes_too_many() { + hello_world_buf().copy_to_bytes(42); + } } From 8f93123efef5c1361086688fe4f34c83c89cec02 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 5 Feb 2021 13:27:30 -0800 Subject: [PATCH 025/420] fix(http1): fix server misinterpretting multiple Transfer-Encoding headers When a request arrived with multiple `Transfer-Encoding` headers, hyper would check each if they ended with `chunked`. It should have only checked if the *last* header ended with `chunked`. See https://github.com/hyperium/hyper/security/advisories/GHSA-6hfq-h8hq-87mf --- src/proto/h1/role.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 0c7eb1eecd..a9f2f0074f 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -213,6 +213,8 @@ impl Http1Transaction for Server { if headers::is_chunked_(&value) { is_te_chunked = true; decoder = DecodedLength::CHUNKED; + } else { + is_te_chunked = false; } } header::CONTENT_LENGTH => { @@ -1444,6 +1446,16 @@ mod tests { "transfer-encoding doesn't end in chunked", ); + parse_err( + "\ + POST / HTTP/1.1\r\n\ + transfer-encoding: chunked\r\n\ + transfer-encoding: afterlol\r\n\ + \r\n\ + ", + "transfer-encoding multiple lines doesn't end in chunked", + ); + // http/1.0 assert_eq!( From 48d4594930da4e227039cfa254411b85c98b63c5 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 5 Feb 2021 14:11:35 -0800 Subject: [PATCH 026/420] v0.14.3 --- CHANGELOG.md | 19 +++++++++++++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74306dd2d8..bcd5154f96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +### v0.14.3 (2021-02-05) + + +#### Bug Fixes + +* **client:** HTTP/1 client "Transfer-Encoding" repair code would panic (#2410) ([2c8121f1](https://github.com/hyperium/hyper/commit/2c8121f1735aa8efeb0d5e4ef595363c373ba470), closes [#2409](https://github.com/hyperium/hyper/issues/2409)) +* **http1:** fix server misinterpretting multiple Transfer-Encoding headers ([8f93123e](https://github.com/hyperium/hyper/commit/8f93123efef5c1361086688fe4f34c83c89cec02)) + + +#### Features + +* **body:** + * reexport `hyper::body::SizeHint` (#2404) ([9956587f](https://github.com/hyperium/hyper/commit/9956587f83428a5dbe338ba0b55c1dc0bce8c282)) + * add `send_trailers` to Body channel's `Sender` (#2387) ([bf8d74ad](https://github.com/hyperium/hyper/commit/bf8d74ad1cf7d0b33b470b1e61625ebac56f9c4c), closes [#2260](https://github.com/hyperium/hyper/issues/2260)) +* **ffi:** + * add HYPERE_INVALID_PEER_MESSAGE error code for parse errors ([1928682b](https://github.com/hyperium/hyper/commit/1928682b33f98244435ba6d574677546205a15ec)) + * Initial C API for hyper ([3ae1581a](https://github.com/hyperium/hyper/commit/3ae1581a539b67363bd87d9d8fc8635a204eec5d)) + + ### v0.14.2 (2020-12-29) diff --git a/Cargo.toml b/Cargo.toml index 4a0f84dad1..11357ca3c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.2" # don't forget to update html_root_url +version = "0.14.3" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" diff --git a/src/lib.rs b/src/lib.rs index 8b16e31293..7029c517a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.14.2")] +#![doc(html_root_url = "https://docs.rs/hyper/0.14.3")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] From 7390f026d79df930cafb6d38845a38d3d6a09c1d Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 5 Feb 2021 15:37:21 -0800 Subject: [PATCH 027/420] Revert "refactor(lib): Switch from pin-project to pin-project-lite" This reverts commit 43412a950f2052e7865eb596c1d39067b2002a94. --- Cargo.toml | 2 +- src/client/conn.rs | 49 ++++---- src/client/connect/http.rs | 27 +++-- src/client/pool.rs | 25 ++--- src/common/drain.rs | 19 ++-- src/proto/h1/dispatch.rs | 11 +- src/proto/h2/mod.rs | 21 ++-- src/proto/h2/server.rs | 72 +++++------- src/server/accept.rs | 14 +-- src/server/conn.rs | 222 ++++++++++++++++--------------------- src/server/server.rs | 23 ++-- src/server/shutdown.rs | 43 ++++--- src/server/tcp.rs | 15 ++- src/service/oneshot.rs | 48 ++++---- 14 files changed, 255 insertions(+), 336 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 11357ca3c8..9617ff7c23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ httparse = "1.0" h2 = { version = "0.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } -pin-project-lite = "0.2.4" +pin-project = "1.0" tower-service = "0.3" tokio = { version = "1", features = ["sync"] } want = "0.3" diff --git a/src/client/conn.rs b/src/client/conn.rs index 62cde0c068..2799c61eff 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -56,7 +56,7 @@ use std::time::Duration; use bytes::Bytes; use futures_util::future::{self, Either, FutureExt as _}; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; @@ -75,23 +75,15 @@ use crate::{Body, Request, Response}; #[cfg(feature = "http1")] type Http1Dispatcher = proto::dispatch::Dispatcher, B, T, R>; -pin_project! { - #[project = ProtoClientProj] - enum ProtoClient - where - B: HttpBody, - { - #[cfg(feature = "http1")] - H1 { - #[pin] - h1: Http1Dispatcher, - }, - #[cfg(feature = "http2")] - H2 { - #[pin] - h2: proto::h2::ClientTask, _phantom: PhantomData, - }, - } +#[pin_project(project = ProtoClientProj)] +enum ProtoClient +where + B: HttpBody, +{ + #[cfg(feature = "http1")] + H1(#[pin] Http1Dispatcher), + #[cfg(feature = "http2")] + H2(#[pin] proto::h2::ClientTask, PhantomData), } /// Returns a handshake future over some IO. @@ -408,7 +400,7 @@ where pub fn into_parts(self) -> Parts { match self.inner.expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1 { h1 } => { + ProtoClient::H1(h1) => { let (io, read_buf, _) = h1.into_inner(); Parts { io, @@ -417,7 +409,7 @@ where } } #[cfg(feature = "http2")] - ProtoClient::H2 { .. } => { + ProtoClient::H2(..) => { panic!("http2 cannot into_inner"); } } @@ -437,9 +429,9 @@ where pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match *self.inner.as_mut().expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), + ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx), #[cfg(feature = "http2")] - ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoClient::H2(ref mut h2, _) => Pin::new(h2).poll(cx).map_ok(|_| ()), } } @@ -468,7 +460,7 @@ where proto::Dispatched::Shutdown => Poll::Ready(Ok(())), #[cfg(feature = "http1")] proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(ProtoClient::H1 { h1 }) => { + Some(ProtoClient::H1(h1)) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); Poll::Ready(Ok(())) @@ -715,17 +707,14 @@ impl Builder { } let cd = proto::h1::dispatch::Client::new(rx); let dispatch = proto::h1::Dispatcher::new(cd, conn); - ProtoClient::H1 { h1: dispatch } + ProtoClient::H1(dispatch) } #[cfg(feature = "http2")] Proto::Http2 => { let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) .await?; - ProtoClient::H2 { - h2, - _phantom: PhantomData, - } + ProtoClient::H2(h2, PhantomData) } }; @@ -779,9 +768,9 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoClientProj::H1 { h1 } => h1.poll(cx), + ProtoClientProj::H1(c) => c.poll(cx), #[cfg(feature = "http2")] - ProtoClientProj::H2 { h2, .. } => h2.poll(cx), + ProtoClientProj::H2(c, _) => c.poll(cx), } } } diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 17339f4179..734aea188a 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -11,7 +11,7 @@ use std::time::Duration; use futures_util::future::Either; use http::uri::{Scheme, Uri}; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; @@ -373,19 +373,18 @@ impl HttpInfo { } } -pin_project! { - // Not publicly exported (so missing_docs doesn't trigger). - // - // We return this `Future` instead of the `Pin>` directly - // so that users don't rely on it fitting in a `Pin>` slot - // (and thus we can change the type in the future). - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, - } +// Not publicly exported (so missing_docs doesn't trigger). +// +// We return this `Future` instead of the `Pin>` directly +// so that users don't rely on it fitting in a `Pin>` slot +// (and thus we can change the type in the future). +#[must_use = "futures do nothing unless polled"] +#[pin_project] +#[allow(missing_debug_implementations)] +pub struct HttpConnecting { + #[pin] + fut: BoxConnecting, + _marker: PhantomData, } type ConnectResult = Result; diff --git a/src/client/pool.rs b/src/client/pool.rs index 94f73f6afd..0f22657bd4 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -11,7 +11,7 @@ use futures_channel::oneshot; use tokio::time::{Duration, Instant, Interval}; use super::client::Ver; -use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; +use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] @@ -714,17 +714,16 @@ impl Expiration { } #[cfg(feature = "runtime")] -pin_project_lite::pin_project! { - struct IdleTask { - #[pin] - interval: Interval, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - #[pin] - pool_drop_notifier: oneshot::Receiver, - } +#[pin_project::pin_project] +struct IdleTask { + #[pin] + interval: Interval, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + #[pin] + pool_drop_notifier: oneshot::Receiver, } #[cfg(feature = "runtime")] @@ -777,7 +776,7 @@ mod tests { use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{exec::Exec, task, Future, Pin}; + use crate::common::{task, exec::Exec, Future, Pin}; /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] diff --git a/src/common/drain.rs b/src/common/drain.rs index 174da876df..4bb2ecc118 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -1,6 +1,6 @@ use std::mem; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::sync::watch; use super::{task, Future, Pin, Poll}; @@ -21,15 +21,14 @@ pub(crate) struct Watch { rx: watch::Receiver<()>, } -pin_project! { - #[allow(missing_debug_implementations)] - pub struct Watching { - #[pin] - future: F, - state: State, - watch: Pin + Send + Sync>>, - _rx: watch::Receiver<()>, - } +#[allow(missing_debug_implementations)] +#[pin_project] +pub struct Watching { + #[pin] + future: F, + state: State, + watch: Pin + Send + Sync>>, + _rx: watch::Receiver<()>, } enum State { diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 1a72450b15..88e641e9a4 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -44,13 +44,10 @@ cfg_server! { } cfg_client! { - pin_project_lite::pin_project! { - pub(crate) struct Client { - callback: Option, http::Response>>, - #[pin] - rx: ClientRx, - rx_closed: bool, - } + pub(crate) struct Client { + callback: Option, http::Response>>, + rx: ClientRx, + rx_closed: bool, } type ClientRx = crate::client::dispatch::Receiver, http::Response>; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index cf78e3f18c..cf06592903 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -5,7 +5,7 @@ use http::header::{ TRANSFER_ENCODING, UPGRADE, }; use http::HeaderMap; -use pin_project_lite::pin_project; +use pin_project::pin_project; use std::error::Error as StdError; use std::io::IoSlice; @@ -94,16 +94,15 @@ fn decode_content_length(headers: &HeaderMap) -> DecodedLength { // body adapters used by both Client and Server -pin_project! { - struct PipeToSendStream - where - S: HttpBody, - { - body_tx: SendStream>, - data_done: bool, - #[pin] - stream: S, - } +#[pin_project] +struct PipeToSendStream +where + S: HttpBody, +{ + body_tx: SendStream>, + data_done: bool, + #[pin] + stream: S, } impl PipeToSendStream diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index 167dd90dbb..eea52e3e4b 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -5,7 +5,7 @@ use std::time::Duration; use h2::server::{Connection, Handshake, SendResponse}; use h2::Reason; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; @@ -57,16 +57,15 @@ impl Default for Config { } } -pin_project! { - pub(crate) struct Server - where - S: HttpService, - B: HttpBody, - { - exec: E, - service: S, - state: State, - } +#[pin_project] +pub(crate) struct Server +where + S: HttpService, + B: HttpBody, +{ + exec: E, + service: S, + state: State, } enum State @@ -316,33 +315,24 @@ where } } -pin_project! { - #[allow(missing_debug_implementations)] - pub struct H2Stream - where - B: HttpBody, - { - reply: SendResponse>, - #[pin] - state: H2StreamState, - } +#[allow(missing_debug_implementations)] +#[pin_project] +pub struct H2Stream +where + B: HttpBody, +{ + reply: SendResponse>, + #[pin] + state: H2StreamState, } -pin_project! { - #[project = H2StreamStateProj] - enum H2StreamState - where - B: HttpBody, - { - Service { - #[pin] - fut: F, - }, - Body { - #[pin] - pipe: PipeToSendStream, - }, - } +#[pin_project(project = H2StreamStateProj)] +enum H2StreamState +where + B: HttpBody, +{ + Service(#[pin] F), + Body(#[pin] PipeToSendStream), } impl H2Stream @@ -352,7 +342,7 @@ where fn new(fut: F, respond: SendResponse>) -> H2Stream { H2Stream { reply: respond, - state: H2StreamState::Service { fut }, + state: H2StreamState::Service(fut), } } } @@ -381,7 +371,7 @@ where let mut me = self.project(); loop { let next = match me.state.as_mut().project() { - H2StreamStateProj::Service { fut: h } => { + H2StreamStateProj::Service(h) => { let res = match h.poll(cx) { Poll::Ready(Ok(r)) => r, Poll::Pending => { @@ -419,15 +409,13 @@ where if !body.is_end_stream() { let body_tx = reply!(me, res, false); - H2StreamState::Body { - pipe: PipeToSendStream::new(body, body_tx), - } + H2StreamState::Body(PipeToSendStream::new(body, body_tx)) } else { reply!(me, res, true); return Poll::Ready(Ok(())); } } - H2StreamStateProj::Body { pipe } => { + H2StreamStateProj::Body(pipe) => { return pipe.poll(cx); } }; diff --git a/src/server/accept.rs b/src/server/accept.rs index 4b7a1487dd..4ec287129d 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -9,7 +9,7 @@ #[cfg(feature = "stream")] use futures_core::Stream; #[cfg(feature = "stream")] -use pin_project_lite::pin_project; +use pin_project::pin_project; use crate::common::{ task::{self, Poll}, @@ -86,12 +86,8 @@ pub fn from_stream(stream: S) -> impl Accept where S: Stream>, { - pin_project! { - struct FromStream { - #[pin] - stream: S, - } - } + #[pin_project] + struct FromStream(#[pin] S); impl Accept for FromStream where @@ -103,9 +99,9 @@ where self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - self.project().stream.poll_next(cx) + self.project().0.poll_next(cx) } } - FromStream { stream } + FromStream(stream) } diff --git a/src/server/conn.rs b/src/server/conn.rs index 112bbe535d..5137708fcb 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -45,6 +45,7 @@ use std::error::Error as StdError; use std::fmt; +#[cfg(feature = "http1")] use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; @@ -52,7 +53,7 @@ use std::net::SocketAddr; use std::time::Duration; use bytes::Bytes; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -108,85 +109,77 @@ enum ConnectionMode { Fallback, } -pin_project! { - /// A stream mapping incoming IOs to new services. - /// - /// Yields `Connecting`s that are futures that should be put on a reactor. - #[must_use = "streams do nothing unless polled"] - #[derive(Debug)] - pub(super) struct Serve { - #[pin] - incoming: I, - make_service: S, - protocol: Http, - } +/// A stream mapping incoming IOs to new services. +/// +/// Yields `Connecting`s that are futures that should be put on a reactor. +#[must_use = "streams do nothing unless polled"] +#[pin_project] +#[derive(Debug)] +pub(super) struct Serve { + #[pin] + incoming: I, + make_service: S, + protocol: Http, } -pin_project! { - /// A future building a new `Service` to a `Connection`. - /// - /// Wraps the future returned from `MakeService` into one that returns - /// a `Connection`. - #[must_use = "futures do nothing unless polled"] - #[derive(Debug)] - pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http, - } +/// A future building a new `Service` to a `Connection`. +/// +/// Wraps the future returned from `MakeService` into one that returns +/// a `Connection`. +#[must_use = "futures do nothing unless polled"] +#[pin_project] +#[derive(Debug)] +pub struct Connecting { + #[pin] + future: F, + io: Option, + protocol: Http, } -pin_project! { - #[must_use = "futures do nothing unless polled"] - #[derive(Debug)] - pub(super) struct SpawnAll { - // TODO: re-add `pub(super)` once rustdoc can handle this. - // - // See https://github.com/rust-lang/rust/issues/64705 - #[pin] - pub(super) serve: Serve, - } +#[must_use = "futures do nothing unless polled"] +#[pin_project] +#[derive(Debug)] +pub(super) struct SpawnAll { + // TODO: re-add `pub(super)` once rustdoc can handle this. + // + // See https://github.com/rust-lang/rust/issues/64705 + #[pin] + pub(super) serve: Serve, } -pin_project! { - /// A future binding a connection with a Service. - /// - /// Polling this future will drive HTTP forward. - #[must_use = "futures do nothing unless polled"] - pub struct Connection - where - S: HttpService, - { - pub(super) conn: Option>, - fallback: Fallback, - } +/// A future binding a connection with a Service. +/// +/// Polling this future will drive HTTP forward. +#[must_use = "futures do nothing unless polled"] +#[pin_project] +pub struct Connection +where + S: HttpService, +{ + pub(super) conn: Option>, + #[cfg(all(feature = "http1", feature = "http2"))] + fallback: Fallback, } -pin_project! { - #[project = ProtoServerProj] - pub(super) enum ProtoServer - where - S: HttpService, - B: HttpBody, - { - #[cfg(feature = "http1")] - H1 { - #[pin] - h1: proto::h1::Dispatcher< - proto::h1::dispatch::Server, - B, - T, - proto::ServerTransaction, - >, - _phantom: PhantomData, - }, - #[cfg(feature = "http2")] - H2 { - #[pin] - h2: proto::h2::Server, S, B, E>, - }, - } +#[pin_project(project = ProtoServerProj)] +pub(super) enum ProtoServer +where + S: HttpService, + B: HttpBody, +{ + #[cfg(feature = "http1")] + H1( + #[pin] + proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, + >, + PhantomData, + ), + #[cfg(feature = "http2")] + H2(#[pin] proto::h2::Server, S, B, E>), } #[cfg(all(feature = "http1", feature = "http2"))] @@ -196,10 +189,6 @@ enum Fallback { Http1Only, } -#[cfg(not(all(feature = "http1", feature = "http2")))] -#[derive(Clone, Debug)] -struct Fallback(PhantomData); - #[cfg(all(feature = "http1", feature = "http2"))] impl Fallback { fn to_h2(&self) -> bool { @@ -530,10 +519,7 @@ impl Http { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); - ProtoServer::H1 { - h1: proto::h1::Dispatcher::new(sd, conn), - _phantom: PhantomData, - } + ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn), PhantomData) }}; } @@ -549,7 +535,7 @@ impl Http { let rewind_io = Rewind::new(io); let h2 = proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); - ProtoServer::H2 { h2 } + ProtoServer::H2(h2) } }; @@ -604,14 +590,14 @@ where /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - match self.conn { + pub fn graceful_shutdown(self: Pin<&mut Self>) { + match self.project().conn { #[cfg(feature = "http1")] - Some(ProtoServer::H1 { ref mut h1, .. }) => { + Some(ProtoServer::H1(ref mut h1, _)) => { h1.disable_keep_alive(); } #[cfg(feature = "http2")] - Some(ProtoServer::H2 { ref mut h2 }) => { + Some(ProtoServer::H2(ref mut h2)) => { h2.graceful_shutdown(); } None => (), @@ -638,7 +624,7 @@ where pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1 { h1, .. } => { + ProtoServer::H1(h1, _) => { let (io, read_buf, dispatch) = h1.into_inner(); Some(Parts { io, @@ -648,7 +634,7 @@ where }) } #[cfg(feature = "http2")] - ProtoServer::H2 { .. } => None, + ProtoServer::H2(_h2) => None, } } @@ -672,7 +658,7 @@ where loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { + ProtoServer::H1(ref mut h1, _) => match ready!(h1.poll_without_shutdown(cx)) { Ok(()) => return Poll::Ready(Ok(())), Err(e) => { #[cfg(feature = "http2")] @@ -688,7 +674,7 @@ where } }, #[cfg(feature = "http2")] - ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()), }; } } @@ -714,8 +700,8 @@ where let conn = self.conn.take(); let (io, read_buf, dispatch) = match conn.unwrap() { - ProtoServer::H1 { h1, .. } => h1.into_inner(), - ProtoServer::H2 { .. } => { + ProtoServer::H1(h1, _) => h1.into_inner(), + ProtoServer::H2(_h2) => { panic!("h2 cannot into_inner"); } }; @@ -728,7 +714,7 @@ where let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); debug_assert!(self.conn.is_none()); - self.conn = Some(ProtoServer::H2 { h2 }); + self.conn = Some(ProtoServer::H2(h2)); } /// Enable this connection to support higher-level HTTP upgrades. @@ -962,9 +948,9 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoServerProj::H1 { h1, .. } => h1.poll(cx), + ProtoServerProj::H1(s, _) => s.poll(cx), #[cfg(feature = "http2")] - ProtoServerProj::H2 { h2 } => h2.poll(cx), + ProtoServerProj::H2(s) => s.poll(cx), } } } @@ -978,7 +964,7 @@ pub(crate) mod spawn_all { use crate::common::exec::ConnStreamExec; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; - use pin_project_lite::pin_project; + use pin_project::pin_project; // Used by `SpawnAll` to optionally watch a `Connection` future. // @@ -1023,36 +1009,23 @@ pub(crate) mod spawn_all { // Users cannot import this type, nor the associated `NewSvcExec`. Instead, // a blanket implementation for `Executor` is sufficient. - pin_project! { - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, - } + #[pin_project] + #[allow(missing_debug_implementations)] + pub struct NewSvcTask, E, W: Watcher> { + #[pin] + state: State, } - pin_project! { - #[project = StateProj] - pub(super) enum State, E, W: Watcher> { - Connecting { - #[pin] - connecting: Connecting, - watcher: W, - }, - Connected { - #[pin] - future: W::Future, - }, - } + #[pin_project(project = StateProj)] + pub(super) enum State, E, W: Watcher> { + Connecting(#[pin] Connecting, W), + Connected(#[pin] W::Future), } impl, E, W: Watcher> NewSvcTask { pub(super) fn new(connecting: Connecting, watcher: W) -> Self { NewSvcTask { - state: State::Connecting { - connecting, - watcher, - }, + state: State::Connecting(connecting, watcher), } } } @@ -1079,10 +1052,7 @@ pub(crate) mod spawn_all { loop { let next = { match me.state.as_mut().project() { - StateProj::Connecting { - connecting, - watcher, - } => { + StateProj::Connecting(connecting, watcher) => { let res = ready!(connecting.poll(cx)); let conn = match res { Ok(conn) => conn, @@ -1092,10 +1062,10 @@ pub(crate) mod spawn_all { return Poll::Ready(()); } }; - let future = watcher.watch(conn.with_upgrades()); - State::Connected { future } + let connected = watcher.watch(conn.with_upgrades()); + State::Connected(connected) } - StateProj::Connected { future } => { + StateProj::Connected(future) => { return future.poll(cx).map(|res| { if let Err(err) = res { debug!("connection error: {}", err); @@ -1163,7 +1133,7 @@ mod upgrades { #[cfg(feature = "http1")] Ok(proto::Dispatched::Upgrade(pending)) => { match self.inner.conn.take() { - Some(ProtoServer::H1 { h1, .. }) => { + Some(ProtoServer::H1(h1, _)) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); return Poll::Ready(Ok(())); diff --git a/src/server/server.rs b/src/server/server.rs index 07d9e5fbb0..48cc6e2803 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -6,7 +6,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener}; #[cfg(feature = "tcp")] use std::time::Duration; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -21,17 +21,16 @@ use super::shutdown::{Graceful, GracefulWatcher}; #[cfg(feature = "tcp")] use super::tcp::AddrIncoming; -pin_project! { - /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. - /// - /// `Server` is a `Future` mapping a bound listener with a set of service - /// handlers. It is built using the [`Builder`](Builder), and the future - /// completes when the server has been shutdown. It should be run by an - /// `Executor`. - pub struct Server { - #[pin] - spawn_all: SpawnAll, - } +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. +/// +/// `Server` is a `Future` mapping a bound listener with a set of service +/// handlers. It is built using the [`Builder`](Builder), and the future +/// completes when the server has been shutdown. It should be run by an +/// `Executor`. +#[pin_project] +pub struct Server { + #[pin] + spawn_all: SpawnAll, } /// A builder for a [`Server`](Server). diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index 122853ac17..e54ba42104 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -1,36 +1,33 @@ use std::error::Error as StdError; -use pin_project_lite::pin_project; +use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; -use super::accept::Accept; use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; +use super::accept::Accept; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::{HttpService, MakeServiceRef}; -pin_project! { - #[allow(missing_debug_implementations)] - pub struct Graceful { - #[pin] - state: State, - } +#[allow(missing_debug_implementations)] +#[pin_project] +pub struct Graceful { + #[pin] + state: State, } -pin_project! { - #[project = StateProj] - pub(super) enum State { - Running { - drain: Option<(Signal, Watch)>, - #[pin] - spawn_all: SpawnAll, - #[pin] - signal: F, - }, - Draining { draining: Draining }, - } +#[pin_project(project = StateProj)] +pub(super) enum State { + Running { + drain: Option<(Signal, Watch)>, + #[pin] + spawn_all: SpawnAll, + #[pin] + signal: F, + }, + Draining(Draining), } impl Graceful { @@ -74,16 +71,14 @@ where Poll::Ready(()) => { debug!("signal received, starting graceful shutdown"); let sig = drain.take().expect("drain channel").0; - State::Draining { - draining: sig.drain(), - } + State::Draining(sig.drain()) } Poll::Pending => { let watch = drain.as_ref().expect("drain channel").1.clone(); return spawn_all.poll_watch(cx, &GracefulWatcher(watch)); } }, - StateProj::Draining { ref mut draining } => { + StateProj::Draining(ref mut draining) => { return Pin::new(draining).poll(cx).map(Ok); } } diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 46c570decd..91afc40120 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -229,14 +229,13 @@ mod addr_stream { use crate::common::{task, Pin, Poll}; - pin_project_lite::pin_project! { - /// A transport returned yieled by `AddrIncoming`. - #[derive(Debug)] - pub struct AddrStream { - #[pin] - inner: TcpStream, - pub(super) remote_addr: SocketAddr, - } + /// A transport returned yieled by `AddrIncoming`. + #[pin_project::pin_project] + #[derive(Debug)] + pub struct AddrStream { + #[pin] + inner: TcpStream, + pub(super) remote_addr: SocketAddr, } impl AddrStream { diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs index 2697af8f4c..766d0c4689 100644 --- a/src/service/oneshot.rs +++ b/src/service/oneshot.rs @@ -1,6 +1,6 @@ // TODO: Eventually to be replaced with tower_util::Oneshot. -use pin_project_lite::pin_project; +use pin_project::pin_project; use tower_service::Service; use crate::common::{task, Future, Pin, Poll}; @@ -10,35 +10,25 @@ where S: Service, { Oneshot { - state: State::NotReady { svc, req }, + state: State::NotReady(svc, req), } } -pin_project! { - // A `Future` consuming a `Service` and request, waiting until the `Service` - // is ready, and then calling `Service::call` with the request, and - // waiting for that `Future`. - #[allow(missing_debug_implementations)] - pub struct Oneshot, Req> { - #[pin] - state: State, - } +// A `Future` consuming a `Service` and request, waiting until the `Service` +// is ready, and then calling `Service::call` with the request, and +// waiting for that `Future`. +#[allow(missing_debug_implementations)] +#[pin_project] +pub struct Oneshot, Req> { + #[pin] + state: State, } -pin_project! { - #[project = StateProj] - #[project_replace = StateProjOwn] - enum State, Req> { - NotReady { - svc: S, - req: Req, - }, - Called { - #[pin] - fut: S::Future, - }, - Tmp, - } +#[pin_project(project = StateProj, project_replace = StateProjOwn)] +enum State, Req> { + NotReady(S, Req), + Called(#[pin] S::Future), + Tmp, } impl Future for Oneshot @@ -52,19 +42,19 @@ where loop { match me.state.as_mut().project() { - StateProj::NotReady { ref mut svc, .. } => { + StateProj::NotReady(ref mut svc, _) => { ready!(svc.poll_ready(cx))?; // fallthrough out of the match's borrow } - StateProj::Called { fut } => { + StateProj::Called(fut) => { return fut.poll(cx); } StateProj::Tmp => unreachable!(), } match me.state.as_mut().project_replace(State::Tmp) { - StateProjOwn::NotReady { mut svc, req } => { - me.state.set(State::Called { fut: svc.call(req) }); + StateProjOwn::NotReady(mut svc, req) => { + me.state.set(State::Called(svc.call(req))); } _ => unreachable!(), } From 4445e736ba45d4c91d2a4b2ad2aaac02ea7cf710 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 5 Feb 2021 15:38:48 -0800 Subject: [PATCH 028/420] refactor(client): fix unused Pin import without http2 --- src/client/dispatch.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 17cfbf4f8c..b42a65352f 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -4,7 +4,9 @@ use std::future::Future; use futures_util::FutureExt; use tokio::sync::{mpsc, oneshot}; -use crate::common::{task, Pin, Poll}; +use crate::common::{task, Poll}; +#[cfg(feature = "http2")] +use crate::common::Pin; pub(crate) type RetryPromise = oneshot::Receiver)>>; pub(crate) type Promise = oneshot::Receiver>; From 196d9bd69c0059dbff6771f6dce9188d4841c185 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 5 Feb 2021 15:54:38 -0800 Subject: [PATCH 029/420] v0.14.4 --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcd5154f96..ad0f31e01e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### v0.14.4 (2021-02-05) + + +#### Bug Fixes + +* **build**: Fix compile error when only `http1` feature was enabled. + + ### v0.14.3 (2021-02-05) diff --git a/Cargo.toml b/Cargo.toml index 9617ff7c23..f63170bee4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.3" # don't forget to update html_root_url +version = "0.14.4" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" diff --git a/src/lib.rs b/src/lib.rs index 7029c517a3..d5fb494327 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.14.3")] +#![doc(html_root_url = "https://docs.rs/hyper/0.14.4")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] From 95ce96048cd71fcfa63d75b1caa75ea000292487 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 7 Feb 2021 00:20:01 +0900 Subject: [PATCH 030/420] test(http1): fix non_fmt_panic warning (#2424) --- src/proto/h1/decode.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 4d2707783a..dad015e092 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -490,7 +490,7 @@ mod tests { } }; if state == ChunkedState::Body || state == ChunkedState::End { - panic!(format!("Was Ok. Expected Err for {:?}", s)); + panic!("Was Ok. Expected Err for {:?}", s); } } } From dfa1bb291d71978820e8077be94c1d5837935bbc Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 6 Feb 2021 12:03:00 +0900 Subject: [PATCH 031/420] chore(ci): use --feature-powerset --depth 2 in features check --- .github/workflows/CI.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index ca864d38a6..d4926afe58 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -111,8 +111,8 @@ jobs: - name: Install cargo-hack run: cargo install cargo-hack - - name: check --each-feature - run: cargo hack check --each-feature --skip ffi -Z avoid-dev-deps + - name: check --feature-powerset + run: cargo hack check --feature-powerset --depth 2 --skip ffi -Z avoid-dev-deps ffi: name: Test C API (FFI) From 42587059e6175735b1a8656c5ddbff0edb19294c Mon Sep 17 00:00:00 2001 From: Arnavion Date: Mon, 8 Feb 2021 20:00:03 -0800 Subject: [PATCH 032/420] docs(server): use cfg_feature! on hyper::server::conn::tcp instead of #[cfg] This is required to surface the required feature (`tcp`) in the generated docs for `hyper::server::conn::{AddrIncoming, AddrStream}`. Before this change, their docs only mentioned the features needed for the `hyper::server::conn` mod itself. Fixes #2425 --- src/server/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/server/mod.rs b/src/server/mod.rs index fac33e06d6..a66cc0fcc2 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -61,6 +61,10 @@ cfg_feature! { pub mod conn; mod server; mod shutdown; - #[cfg(feature = "tcp")] - mod tcp; + + cfg_feature! { + #![feature = "tcp"] + + mod tcp; + } } From f01de8e503c649034a53a8fdf3ad2e107ad9ad2d Mon Sep 17 00:00:00 2001 From: Markus Westerlind Date: Thu, 18 Feb 2021 19:35:43 +0100 Subject: [PATCH 033/420] refactor(client): Use async/await more (#2437) * refactor: Use async/await in client.rs * refactor: Simplify client.rs a bit more * refactor: Allow !Unpin in Lazy * Remove some impl Future * Remove some combinator use --- src/client/client.rs | 307 ++++++++++++++++++++--------------------- src/client/conn.rs | 3 +- src/client/dispatch.rs | 7 +- src/common/lazy.rs | 35 ++--- 4 files changed, 174 insertions(+), 178 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index d213b99c8c..fd61699f84 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -162,7 +162,7 @@ where Version::HTTP_10 => { if is_http_connect { warn!("CONNECT is not allowed for HTTP/1.0"); - return ResponseFuture::new(Box::new(future::err( + return ResponseFuture::new(Box::pin(future::err( crate::Error::new_user_unsupported_request_method(), ))); } @@ -179,35 +179,33 @@ where let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { Ok(s) => s, Err(err) => { - return ResponseFuture::new(Box::new(future::err(err))); + return ResponseFuture::new(Box::pin(future::err(err))); } }; - ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key))) + ResponseFuture::new(Box::pin(self.clone().retryably_send_request(req, pool_key))) } - fn retryably_send_request( - &self, - req: Request, + async fn retryably_send_request( + self, + mut req: Request, pool_key: PoolKey, - ) -> impl Future>> { - let client = self.clone(); + ) -> crate::Result> { let uri = req.uri().clone(); - let mut send_fut = client.send_request(req, pool_key.clone()); - future::poll_fn(move |cx| loop { - match ready!(Pin::new(&mut send_fut).poll(cx)) { - Ok(resp) => return Poll::Ready(Ok(resp)), - Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)), + loop { + req = match self.send_request(req, pool_key.clone()).await { + Ok(resp) => return Ok(resp), + Err(ClientError::Normal(err)) => return Err(err), Err(ClientError::Canceled { connection_reused, mut req, reason, }) => { - if !client.config.retry_canceled_requests || !connection_reused { + if !self.config.retry_canceled_requests || !connection_reused { // if client disabled, don't retry // a fresh connection means we definitely can't retry - return Poll::Ready(Err(reason)); + return Err(reason); } trace!( @@ -215,115 +213,112 @@ where reason ); *req.uri_mut() = uri.clone(); - send_fut = client.send_request(req, pool_key.clone()); + req } } - }) + } } - fn send_request( + async fn send_request( &self, mut req: Request, pool_key: PoolKey, - ) -> impl Future, ClientError>> + Unpin { - let conn = self.connection_for(pool_key); + ) -> Result, ClientError> { + let mut pooled = self.connection_for(pool_key).await?; + + if pooled.is_http1() { + if self.config.set_host { + let uri = req.uri().clone(); + req.headers_mut().entry(HOST).or_insert_with(|| { + let hostname = uri.host().expect("authority implies host"); + if let Some(port) = uri.port() { + let s = format!("{}:{}", hostname, port); + HeaderValue::from_str(&s) + } else { + HeaderValue::from_str(hostname) + } + .expect("uri host is valid header value") + }); + } - let set_host = self.config.set_host; - let executor = self.conn_builder.exec.clone(); - conn.and_then(move |mut pooled| { - if pooled.is_http1() { - if set_host { - let uri = req.uri().clone(); - req.headers_mut().entry(HOST).or_insert_with(|| { - let hostname = uri.host().expect("authority implies host"); - if let Some(port) = uri.port() { - let s = format!("{}:{}", hostname, port); - HeaderValue::from_str(&s) - } else { - HeaderValue::from_str(hostname) - } - .expect("uri host is valid header value") - }); - } + // CONNECT always sends authority-form, so check it first... + if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.conn_info.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + }; + } else if req.method() == Method::CONNECT { + debug!("client does not support CONNECT requests over HTTP2"); + return Err(ClientError::Normal( + crate::Error::new_user_unsupported_request_method(), + )); + } - // CONNECT always sends authority-form, so check it first... - if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } else if pooled.conn_info.is_proxied { - absolute_form(req.uri_mut()); - } else { - origin_form(req.uri_mut()); - }; - } else if req.method() == Method::CONNECT { - debug!("client does not support CONNECT requests over HTTP2"); - return Either::Left(future::err(ClientError::Normal( - crate::Error::new_user_unsupported_request_method(), - ))); + let fut = pooled + .send_request_retryable(req) + .map_err(ClientError::map_with_reused(pooled.is_reused())); + + // If the Connector included 'extra' info, add to Response... + let extra_info = pooled.conn_info.extra.clone(); + let fut = fut.map_ok(move |mut res| { + if let Some(extra) = extra_info { + extra.set(res.extensions_mut()); } + res + }); - let fut = pooled - .send_request_retryable(req) - .map_err(ClientError::map_with_reused(pooled.is_reused())); + // As of futures@0.1.21, there is a race condition in the mpsc + // channel, such that sending when the receiver is closing can + // result in the message being stuck inside the queue. It won't + // ever notify until the Sender side is dropped. + // + // To counteract this, we must check if our senders 'want' channel + // has been closed after having tried to send. If so, error out... + if pooled.is_closed() { + return fut.await; + } - // If the Connector included 'extra' info, add to Response... - let extra_info = pooled.conn_info.extra.clone(); - let fut = fut.map_ok(move |mut res| { - if let Some(extra) = extra_info { - extra.set(res.extensions_mut()); - } - res + let mut res = fut.await?; + + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else if !res.body().is_end_stream() { + let (delayed_tx, delayed_rx) = oneshot::channel(); + res.body_mut().delayed_eof(delayed_rx); + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { + // At this point, `pooled` is dropped, and had a chance + // to insert into the pool (if conn was idle) + drop(delayed_tx); }); - // As of futures@0.1.21, there is a race condition in the mpsc - // channel, such that sending when the receiver is closing can - // result in the message being stuck inside the queue. It won't - // ever notify until the Sender side is dropped. - // - // To counteract this, we must check if our senders 'want' channel - // has been closed after having tried to send. If so, error out... - if pooled.is_closed() { - return Either::Right(Either::Left(fut)); - } + self.conn_builder.exec.execute(on_idle); + } else { + // There's no body to delay, but the connection isn't + // ready yet. Only re-insert when it's ready + let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); - Either::Right(Either::Right(fut.map_ok(move |mut res| { - // If pooled is HTTP/2, we can toss this reference immediately. - // - // when pooled is dropped, it will try to insert back into the - // pool. To delay that, spawn a future that completes once the - // sender is ready again. - // - // This *should* only be once the related `Connection` has polled - // for a new request to start. - // - // It won't be ready if there is a body to stream. - if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { - drop(pooled); - } else if !res.body().is_end_stream() { - let (delayed_tx, delayed_rx) = oneshot::channel(); - res.body_mut().delayed_eof(delayed_rx); - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { - // At this point, `pooled` is dropped, and had a chance - // to insert into the pool (if conn was idle) - drop(delayed_tx); - }); - - executor.execute(on_idle); - } else { - // There's no body to delay, but the connection isn't - // ready yet. Only re-insert when it's ready - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + self.conn_builder.exec.execute(on_idle); + } - executor.execute(on_idle); - } - res - }))) - }) + Ok(res) } - fn connection_for( + async fn connection_for( &self, pool_key: PoolKey, - ) -> impl Future>, ClientError>> { + ) -> Result>, ClientError> { // This actually races 2 different futures to try to get a ready // connection the fastest, and to reduce connection churn. // @@ -340,9 +335,9 @@ where let checkout = self.pool.checkout(pool_key.clone()); let connect = self.connect_to(pool_key); - let executor = self.conn_builder.exec.clone(); // The order of the `select` is depended on below... - future::select(checkout, connect).then(move |either| match either { + + match future::select(checkout, connect).await { // Checkout won, connect future may have been started or not. // // If it has, let it finish and insert back into the pool, @@ -366,12 +361,12 @@ where }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... - executor.execute(bg); + self.conn_builder.exec.execute(bg); } - Either::Left(future::ok(checked_out)) + Ok(checked_out) } // Connect won, checkout can just be dropped. - Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)), + Either::Right((Ok(connected), _checkout)) => Ok(connected), // Either checkout or connect could get canceled: // // 1. Connect is canceled if this is HTTP/2 and there is @@ -380,21 +375,21 @@ where // idle connection reliably. // // In both cases, we should just wait for the other future. - Either::Left((Err(err), connecting)) => Either::Right(Either::Left({ + Either::Left((Err(err), connecting)) => { if err.is_canceled() { - Either::Left(connecting.map_err(ClientError::Normal)) + connecting.await.map_err(ClientError::Normal) } else { - Either::Right(future::err(ClientError::Normal(err))) + Err(ClientError::Normal(err)) } - })), - Either::Right((Err(err), checkout)) => Either::Right(Either::Right({ + } + Either::Right((Err(err), checkout)) => { if err.is_canceled() { - Either::Left(checkout.map_err(ClientError::Normal)) + checkout.await.map_err(ClientError::Normal) } else { - Either::Right(future::err(ClientError::Normal(err))) + Err(ClientError::Normal(err)) } - })), - }) + } + } } fn connect_to( @@ -459,44 +454,40 @@ where conn_builder.http2_only(is_h2); } - Either::Left(Box::pin( - conn_builder - .handshake(io) - .and_then(move |(tx, conn)| { - trace!( - "handshake complete, spawning background dispatcher task" - ); - executor.execute( - conn.map_err(|e| debug!("client connection error: {}", e)) - .map(|_| ()), - ); - - // Wait for 'conn' to ready up before we - // declare this tx as usable - tx.when_ready() - }) - .map_ok(move |tx| { - let tx = { - #[cfg(feature = "http2")] - { - if is_h2 { - PoolTx::Http2(tx.into_http2()) - } else { - PoolTx::Http1(tx) - } - } - #[cfg(not(feature = "http2"))] + Either::Left(Box::pin(async move { + let (tx, conn) = conn_builder.handshake(io).await?; + + trace!("handshake complete, spawning background dispatcher task"); + executor.execute( + conn.map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + let tx = tx.when_ready().await?; + + let tx = { + #[cfg(feature = "http2")] + { + if is_h2 { + PoolTx::Http2(tx.into_http2()) + } else { PoolTx::Http1(tx) - }; - pool.pooled( - connecting, - PoolClient { - conn_info: connected, - tx, - }, - ) - }), - )) + } + } + #[cfg(not(feature = "http2"))] + PoolTx::Http1(tx) + }; + + Ok(pool.pooled( + connecting, + PoolClient { + conn_info: connected, + tx, + }, + )) + })) }), ) }) @@ -563,13 +554,13 @@ impl fmt::Debug for Client { // ===== impl ResponseFuture ===== impl ResponseFuture { - fn new(fut: Box>> + Send>) -> Self { - Self { inner: fut.into() } + fn new(fut: Pin>> + Send>>) -> Self { + Self { inner: fut } } fn error_version(ver: Version) -> Self { warn!("Request has unsupported version \"{:?}\"", ver); - ResponseFuture::new(Box::new(future::err( + ResponseFuture::new(Box::pin(future::err( crate::Error::new_user_unsupported_version(), ))) } diff --git a/src/client/conn.rs b/src/client/conn.rs index 2799c61eff..2da083db16 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -192,12 +192,13 @@ impl SendRequest { self.dispatch.poll_ready(cx) } - pub(super) fn when_ready(self) -> impl Future> { + pub(super) async fn when_ready(self) -> crate::Result { let mut me = Some(self); future::poll_fn(move |cx| { ready!(me.as_mut().unwrap().poll_ready(cx))?; Poll::Ready(Ok(me.take().unwrap())) }) + .await } pub(super) fn is_ready(&self) -> bool { diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index b42a65352f..484cb04f4b 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -4,9 +4,9 @@ use std::future::Future; use futures_util::FutureExt; use tokio::sync::{mpsc, oneshot}; -use crate::common::{task, Poll}; #[cfg(feature = "http2")] use crate::common::Pin; +use crate::common::{task, Poll}; pub(crate) type RetryPromise = oneshot::Receiver)>>; pub(crate) type Promise = oneshot::Receiver>; @@ -230,10 +230,10 @@ impl Callback { } #[cfg(feature = "http2")] - pub(crate) fn send_when( + pub(crate) async fn send_when( self, mut when: impl Future)>> + Unpin, - ) -> impl Future { + ) { use futures_util::future; let mut cb = Some(self); @@ -257,6 +257,7 @@ impl Callback { } } }) + .await } } diff --git a/src/common/lazy.rs b/src/common/lazy.rs index 4d2e322c2c..6bf87c4355 100644 --- a/src/common/lazy.rs +++ b/src/common/lazy.rs @@ -1,4 +1,4 @@ -use std::mem; +use pin_project::pin_project; use super::{task, Future, Pin, Poll}; @@ -18,20 +18,23 @@ where // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] +#[pin_project] pub(crate) struct Lazy { + #[pin] inner: Inner, } +#[pin_project(project = InnerProj, project_replace = InnerProjReplace)] enum Inner { Init(F), - Fut(R), + Fut(#[pin] R), Empty, } impl Started for Lazy where F: FnOnce() -> R, - R: Future + Unpin, + R: Future, { fn started(&self) -> bool { match self.inner { @@ -44,26 +47,26 @@ where impl Future for Lazy where F: FnOnce() -> R, - R: Future + Unpin, + R: Future, { type Output = R::Output; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - if let Inner::Fut(ref mut f) = self.inner { - return Pin::new(f).poll(cx); + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let InnerProj::Fut(f) = this.inner.as_mut().project() { + return f.poll(cx); } - match mem::replace(&mut self.inner, Inner::Empty) { - Inner::Init(func) => { - let mut fut = func(); - let ret = Pin::new(&mut fut).poll(cx); - self.inner = Inner::Fut(fut); - ret + match this.inner.as_mut().project_replace(Inner::Empty) { + InnerProjReplace::Init(func) => { + this.inner.set(Inner::Fut(func())); + if let InnerProj::Fut(f) = this.inner.project() { + return f.poll(cx); + } + unreachable!() } _ => unreachable!("lazy state wrong"), } } } - -// The closure `F` is never pinned -impl Unpin for Lazy {} From 6efc1a1cce0abdd4e3263e7ea328ad49616d8050 Mon Sep 17 00:00:00 2001 From: David Pedersen Date: Fri, 19 Feb 2021 22:50:43 +0100 Subject: [PATCH 034/420] docs(server): add server example using `tower::make::Shared` (#2440) `tower` 0.4.5 introduced `Shared` which is a `MakeService` that produces services by cloning an inner service. This works quite well with `hyper` if your service doesn't need the incoming connection and implements `Clone`. However that might not be entirely obvious so I thought it made sense to add an example to the docs. I wasn't quite sure if the example should go in the server or service module docs but since there already is an example using `make_service_fn` in the server docs I opted to add it there. Let me know if you'd rather have it somewhere else. --- Cargo.toml | 1 + src/server/mod.rs | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index f63170bee4..6392b207cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,6 +66,7 @@ tokio = { version = "1", features = [ ] } tokio-test = "0.4" tokio-util = { version = "0.6", features = ["codec"] } +tower = { version = "0.4", features = ["make"] } tower-util = "0.3" url = "2.2" diff --git a/src/server/mod.rs b/src/server/mod.rs index a66cc0fcc2..7647449adf 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -50,6 +50,41 @@ //! # #[cfg(not(feature = "runtime"))] //! # fn main() {} //! ``` +//! +//! If you don't need the connection and your service implements `Clone` you can use +//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: +//! +//! ```no_run +//! # use std::convert::Infallible; +//! # use std::net::SocketAddr; +//! # use hyper::{Body, Request, Response, Server}; +//! # use hyper::service::{make_service_fn, service_fn}; +//! # use tower::make::Shared; +//! # async fn handle(_req: Request) -> Result, Infallible> { +//! # Ok(Response::new(Body::from("Hello World"))) +//! # } +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! // Construct our SocketAddr to listen on... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! // Shared is a MakeService that produces services by cloning an inner service... +//! let make_service = Shared::new(service_fn(handle)); +//! +//! // Then bind and serve... +//! let server = Server::bind(&addr).serve(make_service); +//! +//! // And run forever... +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! +//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html pub mod accept; From 0b11eee9bde421cdc1680cadabfd38c5aff8e62f Mon Sep 17 00:00:00 2001 From: CfirTsabari Date: Mon, 22 Feb 2021 21:33:28 +0200 Subject: [PATCH 035/420] fix(client): omit default port from automatic Host headers (#2441) Fixes hyperium/hyper#2407 --- src/client/client.rs | 62 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index fd61699f84..10cbc37b63 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -6,7 +6,7 @@ use std::time::Duration; use futures_channel::oneshot; use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use http::header::{HeaderValue, HOST}; -use http::uri::Scheme; +use http::uri::{Port, Scheme}; use http::{Method, Request, Response, Uri, Version}; use super::conn; @@ -231,7 +231,7 @@ where let uri = req.uri().clone(); req.headers_mut().entry(HOST).or_insert_with(|| { let hostname = uri.host().expect("authority implies host"); - if let Some(port) = uri.port() { + if let Some(port) = get_non_default_port(&uri) { let s = format!("{}:{}", hostname, port); HeaderValue::from_str(&s) } else { @@ -820,6 +820,20 @@ fn set_scheme(uri: &mut Uri, scheme: Scheme) { *uri = Uri::from_parts(parts).expect("scheme is valid"); } +fn get_non_default_port(uri: &Uri) -> Option> { + match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { + (Some(443), true) => None, + (Some(80), false) => None, + _ => uri.port(), + } +} + +fn is_schema_secure(uri: &Uri) -> bool { + uri.scheme_str() + .map(|scheme_str| matches!(scheme_str, "wss" | "https")) + .unwrap_or_default() +} + /// A builder to configure a new [`Client`](Client). /// /// # Example @@ -1221,4 +1235,48 @@ mod unit_tests { assert_eq!(scheme, *"http"); assert_eq!(host, "hyper.rs"); } + + #[test] + fn test_is_secure() { + assert_eq!( + is_schema_secure(&"http://hyper.rs".parse::().unwrap()), + false + ); + assert_eq!(is_schema_secure(&"hyper.rs".parse::().unwrap()), false); + assert_eq!( + is_schema_secure(&"wss://hyper.rs".parse::().unwrap()), + true + ); + assert_eq!( + is_schema_secure(&"ws://hyper.rs".parse::().unwrap()), + false + ); + } + + #[test] + fn test_get_non_default_port() { + assert!(get_non_default_port(&"http://hyper.rs".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"http://hyper.rs:80".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"https://hyper.rs:443".parse::().unwrap()).is_none()); + assert!(get_non_default_port(&"hyper.rs:80".parse::().unwrap()).is_none()); + + assert_eq!( + get_non_default_port(&"http://hyper.rs:123".parse::().unwrap()) + .unwrap() + .as_u16(), + 123 + ); + assert_eq!( + get_non_default_port(&"https://hyper.rs:80".parse::().unwrap()) + .unwrap() + .as_u16(), + 80 + ); + assert_eq!( + get_non_default_port(&"hyper.rs:123".parse::().unwrap()) + .unwrap() + .as_u16(), + 123 + ); + } } From a60280873bbfe5c2a2806c88891bf91e3e4c3882 Mon Sep 17 00:00:00 2001 From: CfirTsabari Date: Tue, 23 Feb 2021 02:12:06 +0200 Subject: [PATCH 036/420] refactor(ffi): Removed need for cbindgen type renames (#2442) Fixes hyperium/hyper#2428 --- capi/cbindgen.toml | 5 ----- src/ffi/body.rs | 10 ++++----- src/ffi/client.rs | 16 ++++++------- src/ffi/io.rs | 22 +++++++++--------- src/ffi/task.rs | 56 +++++++++++++++++++++++----------------------- 5 files changed, 52 insertions(+), 57 deletions(-) diff --git a/capi/cbindgen.toml b/capi/cbindgen.toml index fd611e18c4..aef9c6237b 100644 --- a/capi/cbindgen.toml +++ b/capi/cbindgen.toml @@ -7,8 +7,3 @@ documentation_style = "c" [parse.expand] crates = ["hyper-capi"] - -[export.rename] -"Exec" = "hyper_executor" -"Io" = "hyper_io" -"Task" = "hyper_task" diff --git a/src/ffi/body.rs b/src/ffi/body.rs index 14013fc3d0..e49d2dacc7 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -6,7 +6,7 @@ use std::task::{Context, Poll}; use http::HeaderMap; use libc::{c_int, size_t}; -use super::task::{hyper_context, hyper_task_return_type, AsTaskType, Task}; +use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType}; use super::{UserDataPointer, HYPER_ITER_CONTINUE}; use crate::body::{Body, Bytes, HttpBody as _}; @@ -57,11 +57,11 @@ ffi_fn! { /// /// This does not consume the `hyper_body *`, so it may be used to again. /// However, it MUST NOT be used or freed until the related task completes. - fn hyper_body_data(body: *mut hyper_body) -> *mut Task { + fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { // This doesn't take ownership of the Body, so don't allow destructor let mut body = ManuallyDrop::new(unsafe { Box::from_raw(body) }); - Box::into_raw(Task::boxed(async move { + Box::into_raw(hyper_task::boxed(async move { body.0.data().await.map(|res| res.map(hyper_buf)) })) } @@ -78,7 +78,7 @@ ffi_fn! { /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. /// /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. - fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut Task { + fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { if body.is_null() { return ptr::null_mut(); } @@ -86,7 +86,7 @@ ffi_fn! { let mut body = unsafe { Box::from_raw(body) }; let userdata = UserDataPointer(userdata); - Box::into_raw(Task::boxed(async move { + Box::into_raw(hyper_task::boxed(async move { while let Some(item) = body.0.data().await { let chunk = item?; if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { diff --git a/src/ffi/client.rs b/src/ffi/client.rs index def4644141..05d49015a0 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -7,8 +7,8 @@ use crate::rt::Executor as _; use super::error::hyper_code; use super::http_types::{hyper_request, hyper_response}; -use super::io::Io; -use super::task::{hyper_task_return_type, AsTaskType, Exec, Task, WeakExec}; +use super::io::hyper_io; +use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; pub struct hyper_clientconn_options { builder: conn::Builder, @@ -30,7 +30,7 @@ ffi_fn! { /// /// The returned `hyper_task *` must be polled with an executor until the /// handshake completes, at which point the value can be taken. - fn hyper_clientconn_handshake(io: *mut Io, options: *mut hyper_clientconn_options) -> *mut Task { + fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { if io.is_null() { return std::ptr::null_mut(); } @@ -41,7 +41,7 @@ ffi_fn! { let options = unsafe { Box::from_raw(options) }; let io = unsafe { Box::from_raw(io) }; - Box::into_raw(Task::boxed(async move { + Box::into_raw(hyper_task::boxed(async move { options.builder.handshake::<_, crate::Body>(io) .await .map(|(tx, conn)| { @@ -59,7 +59,7 @@ ffi_fn! { /// /// Returns a task that needs to be polled until it is ready. When ready, the /// task yields a `hyper_response *`. - fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut Task { + fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { if conn.is_null() { return std::ptr::null_mut(); } @@ -78,7 +78,7 @@ ffi_fn! { fut.await.map(hyper_response::wrap) }; - Box::into_raw(Task::boxed(fut)) + Box::into_raw(hyper_task::boxed(fut)) } } @@ -118,11 +118,11 @@ ffi_fn! { /// Set the client background task executor. /// /// This does not consume the `options` or the `exec`. - fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const Exec) { + fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) { let opts = unsafe { &mut *opts }; let exec = unsafe { Arc::from_raw(exec) }; - let weak_exec = Exec::downgrade(&exec); + let weak_exec = hyper_executor::downgrade(&exec); std::mem::forget(exec); opts.builder.executor(weak_exec.clone()); diff --git a/src/ffi/io.rs b/src/ffi/io.rs index 5d84168486..62db1ac49c 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -15,7 +15,7 @@ type hyper_io_read_callback = type hyper_io_write_callback = extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; -pub struct Io { +pub struct hyper_io { read: hyper_io_read_callback, write: hyper_io_write_callback, userdata: *mut c_void, @@ -26,8 +26,8 @@ ffi_fn! { /// /// The read and write functions of this transport should be set with /// `hyper_io_set_read` and `hyper_io_set_write`. - fn hyper_io_new() -> *mut Io { - Box::into_raw(Box::new(Io { + fn hyper_io_new() -> *mut hyper_io { + Box::into_raw(Box::new(hyper_io { read: read_noop, write: write_noop, userdata: std::ptr::null_mut(), @@ -40,7 +40,7 @@ ffi_fn! { /// /// This is typically only useful if you aren't going to pass ownership /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. - fn hyper_io_free(io: *mut Io) { + fn hyper_io_free(io: *mut hyper_io) { drop(unsafe { Box::from_raw(io) }); } } @@ -49,7 +49,7 @@ ffi_fn! { /// Set the user data pointer for this IO to some value. /// /// This value is passed as an argument to the read and write callbacks. - fn hyper_io_set_userdata(io: *mut Io, data: *mut c_void) { + fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) { unsafe { &mut *io }.userdata = data; } } @@ -71,7 +71,7 @@ ffi_fn! { /// /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. - fn hyper_io_set_read(io: *mut Io, func: hyper_io_read_callback) { + fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) { unsafe { &mut *io }.read = func; } } @@ -90,7 +90,7 @@ ffi_fn! { /// /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. - fn hyper_io_set_write(io: *mut Io, func: hyper_io_write_callback) { + fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) { unsafe { &mut *io }.write = func; } } @@ -115,7 +115,7 @@ extern "C" fn write_noop( 0 } -impl AsyncRead for Io { +impl AsyncRead for hyper_io { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -141,7 +141,7 @@ impl AsyncRead for Io { } } -impl AsyncWrite for Io { +impl AsyncWrite for hyper_io { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -169,5 +169,5 @@ impl AsyncWrite for Io { } } -unsafe impl Send for Io {} -unsafe impl Sync for Io {} +unsafe impl Send for hyper_io {} +unsafe impl Sync for hyper_io {} diff --git a/src/ffi/task.rs b/src/ffi/task.rs index 61641bd193..86f870c2bd 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -21,7 +21,7 @@ pub const HYPER_POLL_READY: c_int = 0; pub const HYPER_POLL_PENDING: c_int = 1; pub const HYPER_POLL_ERROR: c_int = 3; -pub struct Exec { +pub struct hyper_executor { /// The executor of all task futures. /// /// There should never be contention on the mutex, as it is only locked @@ -38,23 +38,23 @@ pub struct Exec { spawn_queue: Mutex>, /// This is used to track when a future calls `wake` while we are within - /// `Exec::poll_next`. + /// `hyper_executor::poll_next`. is_woken: Arc, } #[derive(Clone)] -pub(crate) struct WeakExec(Weak); +pub(crate) struct WeakExec(Weak); struct ExecWaker(AtomicBool); -pub struct Task { +pub struct hyper_task { future: BoxFuture, output: Option, userdata: UserDataPointer, } struct TaskFuture { - task: Option>, + task: Option>, } pub struct hyper_context<'a>(Context<'a>); @@ -85,29 +85,29 @@ pub(crate) trait IntoDynTaskType { fn into_dyn_task_type(self) -> BoxAny; } -// ===== impl Exec ===== +// ===== impl hyper_executor ===== -impl Exec { - fn new() -> Arc { - Arc::new(Exec { +impl hyper_executor { + fn new() -> Arc { + Arc::new(hyper_executor { driver: Mutex::new(FuturesUnordered::new()), spawn_queue: Mutex::new(Vec::new()), is_woken: Arc::new(ExecWaker(AtomicBool::new(false))), }) } - pub(crate) fn downgrade(exec: &Arc) -> WeakExec { + pub(crate) fn downgrade(exec: &Arc) -> WeakExec { WeakExec(Arc::downgrade(exec)) } - fn spawn(&self, task: Box) { + fn spawn(&self, task: Box) { self.spawn_queue .lock() .unwrap() .push(TaskFuture { task: Some(task) }); } - fn poll_next(&self) -> Option> { + fn poll_next(&self) -> Option> { // Drain the queue first. self.drain_queue(); @@ -169,21 +169,21 @@ impl WeakExec { impl crate::rt::Executor> for WeakExec { fn execute(&self, fut: BoxFuture<()>) { if let Some(exec) = self.0.upgrade() { - exec.spawn(Task::boxed(fut)); + exec.spawn(hyper_task::boxed(fut)); } } } ffi_fn! { /// Creates a new task executor. - fn hyper_executor_new() -> *const Exec { - Arc::into_raw(Exec::new()) + fn hyper_executor_new() -> *const hyper_executor { + Arc::into_raw(hyper_executor::new()) } } ffi_fn! { /// Frees an executor and any incomplete tasks still part of it. - fn hyper_executor_free(exec: *const Exec) { + fn hyper_executor_free(exec: *const hyper_executor) { drop(unsafe { Arc::from_raw(exec) }); } } @@ -193,7 +193,7 @@ ffi_fn! { /// /// The executor takes ownership of the task, it should not be accessed /// again unless returned back to the user with `hyper_executor_poll`. - fn hyper_executor_push(exec: *const Exec, task: *mut Task) -> hyper_code { + fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { if exec.is_null() || task.is_null() { return hyper_code::HYPERE_INVALID_ARG; } @@ -211,7 +211,7 @@ ffi_fn! { /// If ready, returns a task from the executor that has completed. /// /// If there are no ready tasks, this returns `NULL`. - fn hyper_executor_poll(exec: *const Exec) -> *mut Task { + fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { // We only want an `&Arc` in here, so wrap in a `ManuallyDrop` so we // don't accidentally trigger a ref_dec of the Arc. let exec = unsafe { &*exec }; @@ -222,15 +222,15 @@ ffi_fn! { } } -// ===== impl Task ===== +// ===== impl hyper_task ===== -impl Task { - pub(crate) fn boxed(fut: F) -> Box +impl hyper_task { + pub(crate) fn boxed(fut: F) -> Box where F: Future + Send + 'static, F::Output: IntoDynTaskType + Send + Sync + 'static, { - Box::new(Task { + Box::new(hyper_task { future: Box::pin(async move { fut.await.into_dyn_task_type() }), output: None, userdata: UserDataPointer(ptr::null_mut()), @@ -246,7 +246,7 @@ impl Task { } impl Future for TaskFuture { - type Output = Box; + type Output = Box; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) { @@ -262,7 +262,7 @@ impl Future for TaskFuture { ffi_fn! { /// Free a task. - fn hyper_task_free(task: *mut Task) { + fn hyper_task_free(task: *mut hyper_task) { drop(unsafe { Box::from_raw(task) }); } } @@ -274,7 +274,7 @@ ffi_fn! { /// this task. /// /// Use `hyper_task_type` to determine the type of the `void *` return value. - fn hyper_task_value(task: *mut Task) -> *mut c_void { + fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { if task.is_null() { return ptr::null_mut(); } @@ -297,7 +297,7 @@ ffi_fn! { ffi_fn! { /// Query the return type of this task. - fn hyper_task_type(task: *mut Task) -> hyper_task_return_type { + fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type { if task.is_null() { // instead of blowing up spectacularly, just say this null task // doesn't have a value to retrieve. @@ -313,7 +313,7 @@ ffi_fn! { /// /// This value will be passed to task callbacks, and can be checked later /// with `hyper_task_userdata`. - fn hyper_task_set_userdata(task: *mut Task, userdata: *mut c_void) { + fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) { if task.is_null() { return; } @@ -324,7 +324,7 @@ ffi_fn! { ffi_fn! { /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. - fn hyper_task_userdata(task: *mut Task) -> *mut c_void { + fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void { if task.is_null() { return ptr::null_mut(); } From 4c946af49cc7fbbc6bd4894283a654625c2ea383 Mon Sep 17 00:00:00 2001 From: ty Date: Tue, 23 Feb 2021 08:18:38 +0800 Subject: [PATCH 037/420] feat(server): add `AddrIncoming::from_listener` constructor (#2439) --- src/server/tcp.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 91afc40120..16a663719c 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -36,6 +36,16 @@ impl AddrIncoming { .set_nonblocking(true) .map_err(crate::Error::new_listen)?; let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?; + AddrIncoming::from_listener(listener) + } + + /// Creates a new `AddrIncoming` binding to provided socket address. + pub fn bind(addr: &SocketAddr) -> crate::Result { + AddrIncoming::new(addr) + } + + /// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`. + pub fn from_listener(listener: TcpListener) -> crate::Result { let addr = listener.local_addr().map_err(crate::Error::new_listen)?; Ok(AddrIncoming { listener, @@ -47,11 +57,6 @@ impl AddrIncoming { }) } - /// Creates a new `AddrIncoming` binding to provided socket address. - pub fn bind(addr: &SocketAddr) -> crate::Result { - AddrIncoming::new(addr) - } - /// Get the local address bound to this listener. pub fn local_addr(&self) -> SocketAddr { self.addr From f162ca2f2fd14681e11dd8b9ba8d1469b2b9271b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 26 Feb 2021 19:00:37 -0800 Subject: [PATCH 038/420] docs(ffi): generate FFI documentation (#2447) --- .github/workflows/CI.yml | 2 +- Cargo.toml | 4 ++-- src/ffi/body.rs | 2 ++ src/ffi/client.rs | 6 ++++++ src/ffi/error.rs | 2 ++ src/ffi/http_types.rs | 5 +++++ src/ffi/io.rs | 5 +++++ src/ffi/mod.rs | 38 ++++++++++++++++++++++++++++++++++++++ src/ffi/task.rs | 11 +++++++++++ src/lib.rs | 2 +- 10 files changed, 73 insertions(+), 4 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index d4926afe58..07d1ea04ca 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -179,4 +179,4 @@ jobs: uses: actions-rs/cargo@v1 with: command: rustdoc - args: --features full -- --cfg docsrs -D broken-intra-doc-links + args: --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links diff --git a/Cargo.toml b/Cargo.toml index 6392b207cd..31ac0356f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,8 +118,8 @@ nightly = [] __internal_happy_eyeballs_tests = [] [package.metadata.docs.rs] -features = ["full"] -rustdoc-args = ["--cfg", "docsrs"] +features = ["ffi", "full"] +rustdoc-args = ["--cfg", "docsrs", "--cfg", "hyper_unstable_ffi"] [package.metadata.playground] features = ["full"] diff --git a/src/ffi/body.rs b/src/ffi/body.rs index e49d2dacc7..51e5c8eae9 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -10,8 +10,10 @@ use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType} use super::{UserDataPointer, HYPER_ITER_CONTINUE}; use crate::body::{Body, Bytes, HttpBody as _}; +/// A streaming HTTP body. pub struct hyper_body(pub(super) Body); +/// A buffer of bytes that is sent or received on a `hyper_body`. pub struct hyper_buf(pub(super) Bytes); pub(crate) struct UserBody { diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 05d49015a0..a64842b6a3 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -10,12 +10,18 @@ use super::http_types::{hyper_request, hyper_response}; use super::io::hyper_io; use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; +/// An options builder to configure an HTTP client connection. pub struct hyper_clientconn_options { builder: conn::Builder, /// Use a `Weak` to prevent cycles. exec: WeakExec, } +/// An HTTP client connection handle. +/// +/// These are used to send a request on a single connection. It's possible to +/// send multiple requests on a single connection, such as when HTTP/1 +/// keep-alive or HTTP/2 is used. pub struct hyper_clientconn { tx: conn::SendRequest, } diff --git a/src/ffi/error.rs b/src/ffi/error.rs index eb563a60d5..7b85407099 100644 --- a/src/ffi/error.rs +++ b/src/ffi/error.rs @@ -1,7 +1,9 @@ use libc::size_t; +/// A more detailed error object returned by some hyper functions. pub struct hyper_error(crate::Error); +/// A return code for many of hyper's methods. #[repr(C)] pub enum hyper_code { /// All is well. diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index 6dba5a494c..edb43ed31c 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -9,10 +9,15 @@ use super::HYPER_ITER_CONTINUE; use crate::header::{HeaderName, HeaderValue}; use crate::{Body, HeaderMap, Method, Request, Response, Uri}; +/// An HTTP request. pub struct hyper_request(pub(super) Request); +/// An HTTP response. pub struct hyper_response(pub(super) Response); +/// An HTTP header map. +/// +/// These can be part of a request or response. #[derive(Default)] pub struct hyper_headers { pub(super) headers: HeaderMap, diff --git a/src/ffi/io.rs b/src/ffi/io.rs index 62db1ac49c..f37b6cb781 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -7,7 +7,11 @@ use tokio::io::{AsyncRead, AsyncWrite}; use super::task::hyper_context; +/// Sentinal value to return from a read or write callback that the operation +/// is pending. pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; +/// Sentinal value to return from a read or write callback that the operation +/// has errored. pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; type hyper_io_read_callback = @@ -15,6 +19,7 @@ type hyper_io_read_callback = type hyper_io_write_callback = extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; +/// An IO object used to represent a socket or similar concept. pub struct hyper_io { read: hyper_io_read_callback, write: hyper_io_write_callback, diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index b593c89d77..39f3276bc8 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -1,8 +1,33 @@ // We have a lot of c-types in here, stop warning about their names! #![allow(non_camel_case_types)] +// fmt::Debug isn't helpful on FFI types +#![allow(missing_debug_implementations)] // unreachable_pub warns `#[no_mangle] pub extern fn` in private mod. #![allow(unreachable_pub)] +//! # hyper C API +//! +//! This part of the documentation describes the C API for hyper. That is, how +//! to *use* the hyper library in C code. This is **not** a regular Rust +//! module, and thus it is not accessible in Rust. +//! +//! ## Unstable +//! +//! The C API of hyper is currently **unstable**, which means it's not part of +//! the semver contract as the rest of the Rust API is. Because of that, it's +//! only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when +//! compiling. The easiest way to do that is setting the `RUSTFLAGS` +//! environment variable. +//! +//! ## Building +//! +//! The C API is part of the Rust library, but isn't compiled by default. Using +//! `cargo`, it can be compiled with the following command: +//! +//! ```notrust +//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi +//! ``` + // We may eventually allow the FFI to be enabled without `client` or `http1`, // that is why we don't auto enable them as `ffi = ["client", "http1"]` in // the `Cargo.toml`. @@ -29,16 +54,29 @@ mod http_types; mod io; mod task; +pub use self::body::*; +pub use self::client::*; +pub use self::error::*; +pub use self::http_types::*; +pub use self::io::*; +pub use self::task::*; + pub(crate) use self::body::UserBody; pub(crate) use self::http_types::{HeaderCaseMap, ReasonPhrase}; +/// Return in iter functions to continue iterating. pub const HYPER_ITER_CONTINUE: libc::c_int = 0; +/// Return in iter functions to stop iterating. #[allow(unused)] pub const HYPER_ITER_BREAK: libc::c_int = 1; +/// An HTTP Version that is unspecified. pub const HYPER_HTTP_VERSION_NONE: libc::c_int = 0; +/// The HTTP/1.0 version. pub const HYPER_HTTP_VERSION_1_0: libc::c_int = 10; +/// The HTTP/1.1 version. pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; +/// The HTTP/2 version. pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; struct UserDataPointer(*mut std::ffi::c_void); diff --git a/src/ffi/task.rs b/src/ffi/task.rs index 86f870c2bd..b42be59808 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -17,10 +17,17 @@ use super::UserDataPointer; type BoxFuture = Pin + Send>>; type BoxAny = Box; +/// Return in a poll function to indicate it was ready. pub const HYPER_POLL_READY: c_int = 0; +/// Return in a poll function to indicate it is still pending. +/// +/// The passed in `hyper_waker` should be registered to wake up the task at +/// some later point. pub const HYPER_POLL_PENDING: c_int = 1; +/// Return in a poll function indicate an error. pub const HYPER_POLL_ERROR: c_int = 3; +/// A task executor for `hyper_task`s. pub struct hyper_executor { /// The executor of all task futures. /// @@ -47,6 +54,7 @@ pub(crate) struct WeakExec(Weak); struct ExecWaker(AtomicBool); +/// An async task. pub struct hyper_task { future: BoxFuture, output: Option, @@ -57,12 +65,15 @@ struct TaskFuture { task: Option>, } +/// An async context for a task that contains the related waker. pub struct hyper_context<'a>(Context<'a>); +/// A waker that is saved and used to waken a pending task. pub struct hyper_waker { waker: std::task::Waker, } +/// A descriptor for what type a `hyper_task` value is. #[repr(C)] pub enum hyper_task_return_type { /// The value of this task is null (does not imply an error). diff --git a/src/lib.rs b/src/lib.rs index d5fb494327..daddbdfb0a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -89,7 +89,7 @@ pub mod service; pub mod upgrade; #[cfg(feature = "ffi")] -mod ffi; +pub mod ffi; cfg_proto! { mod headers; From 8cbf9527dfb313b3f84fcd83260c5c72ce4a1beb Mon Sep 17 00:00:00 2001 From: Peter Smit Date: Fri, 5 Mar 2021 02:03:33 +0100 Subject: [PATCH 039/420] fix(server): skip automatic Content-Length headers when not allowed (#2216) Closes #2215 --- src/proto/h1/role.rs | 23 ++++++++++++++++------- tests/server.rs | 5 +++-- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index a9f2f0074f..b7310ba99d 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -562,13 +562,13 @@ impl Http1Transaction for Server { } } None | Some(BodyLength::Known(0)) => { - if msg.head.subject != StatusCode::NOT_MODIFIED { + if Server::can_have_content_length(msg.req_method, msg.head.subject) { extend(dst, b"content-length: 0\r\n"); } Encoder::length(0) } Some(BodyLength::Known(len)) => { - if msg.head.subject == StatusCode::NOT_MODIFIED { + if !Server::can_have_content_length(msg.req_method, msg.head.subject) { Encoder::length(0) } else { extend(dst, b"content-length: "); @@ -638,13 +638,22 @@ impl Server { if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() { false + } else if status.is_informational() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn can_have_content_length(method: &Option, status: StatusCode) -> bool { + if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { + false } else { match status { - // TODO: support for 1xx codes needs improvement everywhere - // would be 100...199 => false - StatusCode::SWITCHING_PROTOCOLS - | StatusCode::NO_CONTENT - | StatusCode::NOT_MODIFIED => false, + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, _ => true, } } diff --git a/tests/server.rs b/tests/server.rs index 72d2e459d8..662e903d57 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -1330,8 +1330,9 @@ async fn upgrades_new() { let mut buf = [0; 256]; tcp.read(&mut buf).expect("read 1"); - let expected = "HTTP/1.1 101 Switching Protocols\r\n"; - assert_eq!(s(&buf[..expected.len()]), expected); + let response = s(&buf); + assert!(response.starts_with("HTTP/1.1 101 Switching Protocols\r\n")); + assert!(!has_header(&response, "content-length")); let _ = read_101_tx.send(()); let n = tcp.read(&mut buf).expect("read 2"); From 34085afef66b453dfee1c561e6683546bf4600c8 Mon Sep 17 00:00:00 2001 From: 0x79756b69 <76654022+0x79756b69@users.noreply.github.com> Date: Tue, 9 Mar 2021 07:19:24 +0900 Subject: [PATCH 040/420] docs(examples): use hyper v0.14 and full feature (#2451) --- examples/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 7071bd7b70..45801d60ad 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,7 +10,7 @@ Most of these examples use these dependencies: ```toml [dependencies] -hyper = "0.13" +hyper = { version = "0.14", features = ["full"] } tokio = { version = "0.2", features = ["full"] } pretty_env_logger = "0.4" ``` From 297a068454115e8d5426f9051398d851f69235bf Mon Sep 17 00:00:00 2001 From: kolapapa <346512016@qq.com> Date: Thu, 11 Mar 2021 02:00:39 +0800 Subject: [PATCH 041/420] docs(examples): upgrade tokio version (#2456) --- examples/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 45801d60ad..2d08936c1c 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,7 +11,7 @@ Most of these examples use these dependencies: ```toml [dependencies] hyper = { version = "0.14", features = ["full"] } -tokio = { version = "0.2", features = ["full"] } +tokio = { version = "1", features = ["full"] } pretty_env_logger = "0.4" ``` From eb0e7186963bbe4fa1ad4478ac493f75ddc92ae5 Mon Sep 17 00:00:00 2001 From: Danilo Bargen Date: Mon, 15 Mar 2021 23:40:28 +0100 Subject: [PATCH 042/420] docs(body): add links to to_bytes and aggregate (#2464) Since these two functions are not methods on the `Body`, they aren't very discoverable. So a note in the docs would definitely be helpful. --- src/body/body.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/body/body.rs b/src/body/body.rs index 1be2c1b0c2..a1ac6ad3b6 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -29,6 +29,9 @@ type TrailersSender = oneshot::Sender; /// /// A good default [`HttpBody`](crate::body::HttpBody) to use in many /// applications. +/// +/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes) +/// or [`body::aggregate`](crate::body::aggregate). #[must_use = "streams do nothing unless polled"] pub struct Body { kind: Kind, From 48fdaf160689f333e9bb63388d0b1d0fa29a1391 Mon Sep 17 00:00:00 2001 From: Vincent Ricard Date: Fri, 19 Mar 2021 18:38:58 +0100 Subject: [PATCH 043/420] fix(headers): Support multiple Content-Length values on same line (#2471) Closes #2470 --- src/headers.rs | 38 +++++++++++++++++++------------------- tests/client.rs | 24 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 19 deletions(-) diff --git a/src/headers.rs b/src/headers.rs index 7fc486a80c..0d16cdfd5a 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -42,26 +42,26 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> // be alright if they all contain the same value, and all parse // correctly. If not, then it's an error. - let folded = values.fold(None, |prev, line| match prev { - Some(Ok(prev)) => Some( - line.to_str() - .map_err(|_| ()) - .and_then(|s| s.parse().map_err(|_| ())) - .and_then(|n| if prev == n { Ok(n) } else { Err(()) }), - ), - None => Some( - line.to_str() - .map_err(|_| ()) - .and_then(|s| s.parse().map_err(|_| ())), - ), - Some(Err(())) => Some(Err(())), - }); - - if let Some(Ok(n)) = folded { - Some(n) - } else { - None + let mut content_length: Option = None; + for h in values { + if let Ok(line) = h.to_str() { + for v in line.split(',') { + if let Some(n) = v.trim().parse().ok() { + if content_length.is_none() { + content_length = Some(n) + } else if content_length != Some(n) { + return None; + } + } else { + return None + } + } + } else { + return None + } } + + return content_length } #[cfg(all(feature = "http2", feature = "client"))] diff --git a/tests/client.rs b/tests/client.rs index 479500ab3e..0585695a80 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -1043,6 +1043,30 @@ test! { error: |err| err.to_string() == "request has unsupported HTTP version", } +test! { + name: client_handles_contentlength_values_on_same_line, + + server: + expected: "GET /foo HTTP/1.1\r\nhost: {addr}\r\n\r\n", + reply: "\ + HTTP/1.1 200 OK\r\n\ + Content-Length: 3,3\r\n\ + Content-Length: 3,3\r\n\ + \r\n\ + abc\r\n", + + client: + request: { + method: GET, + url: "http://{addr}/foo", + }, + response: + status: OK, + headers: { + }, + body: &b"abc"[..], +} + mod dispatch_impl { use super::*; use std::io::{self, Read, Write}; From 41f99578a53845e5e0bb999c101bef8307e1ce5f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 22 Mar 2021 22:16:31 +0000 Subject: [PATCH 044/420] refactor(dependencies): update to socket2 v0.4.0 (#2472) --- Cargo.toml | 2 +- src/client/connect/http.rs | 12 +++++------ src/server/tcp.rs | 41 +++----------------------------------- 3 files changed, 9 insertions(+), 46 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 31ac0356f5..10a63999bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,7 @@ want = "0.3" # Optional libc = { version = "0.2", optional = true } -socket2 = { version = "0.3.16", optional = true } +socket2 = { version = "0.4", optional = true } [dev-dependencies] futures-util = { version = "0.3", default-features = false, features = ["alloc"] } diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 734aea188a..4437c86380 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -584,14 +584,11 @@ fn connect( // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the // keepalive timeout, it would be nice to use that instead of socket2, // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... - use socket2::{Domain, Protocol, Socket, Type}; + use socket2::{Domain, Protocol, Socket, TcpKeepalive, Type}; use std::convert::TryInto; - let domain = match *addr { - SocketAddr::V4(_) => Domain::ipv4(), - SocketAddr::V6(_) => Domain::ipv6(), - }; - let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp())) + let domain = Domain::for_address(*addr); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) .map_err(ConnectError::m("tcp open error"))?; // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is @@ -601,7 +598,8 @@ fn connect( .map_err(ConnectError::m("tcp set_nonblocking error"))?; if let Some(dur) = config.keep_alive_timeout { - if let Err(e) = socket.set_keepalive(Some(dur)) { + let conf = TcpKeepalive::new().with_time(dur); + if let Err(e) = socket.set_tcp_keepalive(&conf) { warn!("tcp set_keepalive error: {}", e); } } diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 16a663719c..7b2f68b3a9 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -108,46 +108,11 @@ impl AddrIncoming { match ready!(self.listener.poll_accept(cx)) { Ok((socket, addr)) => { if let Some(dur) = self.tcp_keepalive_timeout { - // Convert the Tokio `TcpStream` into a `socket2` socket - // so we can call `set_keepalive`. - // TODO(eliza): if Tokio's `TcpSocket` API grows a few - // more methods in the future, hopefully we shouldn't - // have to do the `from_raw_fd` dance any longer... - #[cfg(unix)] - let socket = unsafe { - // Safety: `socket2`'s socket will try to close the - // underlying fd when it's dropped. However, we - // can't take ownership of the fd from the tokio - // TcpStream, so instead we will call `into_raw_fd` - // on the socket2 socket before dropping it. This - // prevents it from trying to close the fd. - use std::os::unix::io::{AsRawFd, FromRawFd}; - socket2::Socket::from_raw_fd(socket.as_raw_fd()) - }; - #[cfg(windows)] - let socket = unsafe { - // Safety: `socket2`'s socket will try to close the - // underlying SOCKET when it's dropped. However, we - // can't take ownership of the SOCKET from the tokio - // TcpStream, so instead we will call `into_raw_socket` - // on the socket2 socket before dropping it. This - // prevents it from trying to close the SOCKET. - use std::os::windows::io::{AsRawSocket, FromRawSocket}; - socket2::Socket::from_raw_socket(socket.as_raw_socket()) - }; - - // Actually set the TCP keepalive timeout. - if let Err(e) = socket.set_keepalive(Some(dur)) { + let socket = socket2::SockRef::from(&socket); + let conf = socket2::TcpKeepalive::new().with_time(dur); + if let Err(e) = socket.set_tcp_keepalive(&conf) { trace!("error trying to set TCP keepalive: {}", e); } - - // Take ownershop of the fd/socket back from the socket2 - // `Socket`, so that socket2 doesn't try to close it - // when it's dropped. - #[cfg(unix)] - drop(std::os::unix::io::IntoRawFd::into_raw_fd(socket)); - #[cfg(windows)] - drop(std::os::windows::io::IntoRawSocket::into_raw_socket(socket)); } if let Err(e) = socket.set_nodelay(self.tcp_nodelay) { trace!("error trying to set TCP nodelay: {}", e); From 51ed71b0a6e1d90d324f87151434334bab6fb837 Mon Sep 17 00:00:00 2001 From: Vagelis Prokopiou Date: Thu, 25 Mar 2021 22:34:03 +0200 Subject: [PATCH 045/420] docs(client): use Method::POST to match the example in https://hyper.rs/guides/client/advanced/ (#2479) --- src/client/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index 10cbc37b63..37a3251a7a 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -141,12 +141,12 @@ where /// ``` /// # #[cfg(feature = "runtime")] /// # fn run () { - /// use hyper::{Body, Client, Request}; + /// use hyper::{Body, Method, Client, Request}; /// /// let client = Client::new(); /// /// let req = Request::builder() - /// .method("POST") + /// .method(Method::POST) /// .uri("http://httpbin.org/post") /// .body(Body::from("Hallo!")) /// .expect("request builder"); From 68d4e4a3db91fb43f41a8c4fce1175ddb56816af Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Fri, 26 Mar 2021 19:25:00 +0100 Subject: [PATCH 046/420] feat(client): allow HTTP/0.9 responses behind a flag (#2473) Fixes #2468 --- src/client/client.rs | 8 ++++++ src/client/conn.rs | 10 +++++++ src/lib.rs | 1 - src/proto/h1/conn.rs | 11 ++++++++ src/proto/h1/io.rs | 2 ++ src/proto/h1/mod.rs | 1 + src/proto/h1/role.rs | 63 ++++++++++++++++++++++++++++++++++++++++-- tests/client.rs | 66 ++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 158 insertions(+), 4 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index 37a3251a7a..418f3fb4e9 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -972,6 +972,14 @@ impl Builder { self } + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, val: bool) -> &mut Self { + self.conn_builder.h09_responses(val); + self + } + /// Set whether the connection **must** use HTTP/2. /// /// The destination must either allow HTTP2 Prior Knowledge, or the diff --git a/src/client/conn.rs b/src/client/conn.rs index 2da083db16..b87600d85a 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -122,6 +122,7 @@ where #[derive(Clone, Debug)] pub struct Builder { pub(super) exec: Exec, + h09_responses: bool, h1_title_case_headers: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, @@ -493,6 +494,7 @@ impl Builder { pub fn new() -> Builder { Builder { exec: Exec::Default, + h09_responses: false, h1_read_buf_exact_size: None, h1_title_case_headers: false, h1_max_buf_size: None, @@ -514,6 +516,11 @@ impl Builder { self } + pub(super) fn h09_responses(&mut self, enabled: bool) -> &mut Builder { + self.h09_responses = enabled; + self + } + pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { self.h1_title_case_headers = enabled; self @@ -700,6 +707,9 @@ impl Builder { if opts.h1_title_case_headers { conn.set_title_case_headers(); } + if opts.h09_responses { + conn.set_h09_responses(); + } if let Some(sz) = opts.h1_read_buf_exact_size { conn.set_read_buf_exact_size(sz); } diff --git a/src/lib.rs b/src/lib.rs index daddbdfb0a..059f8821c6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -#![doc(html_root_url = "https://docs.rs/hyper/0.14.4")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 174a1d8695..ce0848ddea 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -47,6 +47,7 @@ where #[cfg(feature = "ffi")] preserve_header_case: false, title_case_headers: false, + h09_responses: false, notify_read: false, reading: Reading::Init, writing: Writing::Init, @@ -78,6 +79,11 @@ where self.state.title_case_headers = true; } + #[cfg(feature = "client")] + pub(crate) fn set_h09_responses(&mut self) { + self.state.h09_responses = true; + } + #[cfg(feature = "server")] pub(crate) fn set_allow_half_close(&mut self) { self.state.allow_half_close = true; @@ -146,6 +152,7 @@ where req_method: &mut self.state.method, #[cfg(feature = "ffi")] preserve_header_case: self.state.preserve_header_case, + h09_responses: self.state.h09_responses, } )) { Ok(msg) => msg, @@ -157,6 +164,9 @@ where debug!("incoming body is {}", msg.decode); + // Prevent accepting HTTP/0.9 responses after the initial one, if any. + self.state.h09_responses = false; + self.state.busy(); self.state.keep_alive &= msg.keep_alive; self.state.version = msg.head.version; @@ -753,6 +763,7 @@ struct State { #[cfg(feature = "ffi")] preserve_header_case: bool, title_case_headers: bool, + h09_responses: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. notify_read: bool, diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 5536b5d164..c7ce48664b 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -161,6 +161,7 @@ where req_method: parse_ctx.req_method, #[cfg(feature = "ffi")] preserve_header_case: parse_ctx.preserve_header_case, + h09_responses: parse_ctx.h09_responses, }, )? { Some(msg) => { @@ -640,6 +641,7 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }; assert!(buffered .parse::(cx, parse_ctx) diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 1498872ea8..01a9253fa3 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -72,6 +72,7 @@ pub(crate) struct ParseContext<'a> { req_method: &'a mut Option, #[cfg(feature = "ffi")] preserve_header_case: bool, + h09_responses: bool, } /// Passed to Http1Transaction::encode diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index b7310ba99d..ea9dc96be1 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -683,8 +683,8 @@ impl Http1Transaction for Client { ); let mut res = httparse::Response::new(&mut headers); let bytes = buf.as_ref(); - match res.parse(bytes)? { - httparse::Status::Complete(len) => { + match res.parse(bytes) { + Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; @@ -710,7 +710,18 @@ impl Http1Transaction for Client { let headers_len = res.headers.len(); (len, status, reason, version, headers_len) } - httparse::Status::Partial => return Ok(None), + Ok(httparse::Status::Partial) => return Ok(None), + Err(httparse::Error::Version) if ctx.h09_responses => { + trace!("Response.parse accepted HTTP/0.9 response"); + + #[cfg(not(feature = "ffi"))] + let reason = (); + #[cfg(feature = "ffi")] + let reason = None; + + (0, StatusCode::OK, reason, Version::HTTP_09, 0) + } + Err(e) => return Err(e.into()), } }; @@ -1222,6 +1233,7 @@ mod tests { req_method: &mut method, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .unwrap() @@ -1244,6 +1256,7 @@ mod tests { req_method: &mut Some(crate::Method::GET), #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1261,10 +1274,46 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }; Server::parse(&mut raw, ctx).unwrap_err(); } + const H09_RESPONSE: &'static str = "Baguettes are super delicious, don't you agree?"; + + #[test] + fn test_parse_response_h09_allowed() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, + h09_responses: true, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw, H09_RESPONSE); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_09); + assert_eq!(msg.head.headers.len(), 0); + } + + #[test] + fn test_parse_response_h09_rejected() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(H09_RESPONSE); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + #[cfg(feature = "ffi")] + preserve_header_case: false, + h09_responses: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + assert_eq!(raw, H09_RESPONSE); + } + #[test] fn test_decoder_request() { fn parse(s: &str) -> ParsedMessage { @@ -1276,6 +1325,7 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .expect("parse ok") @@ -1291,6 +1341,7 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .expect_err(comment) @@ -1505,6 +1556,7 @@ mod tests { req_method: &mut Some(Method::GET), #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, } ) .expect("parse ok") @@ -1520,6 +1572,7 @@ mod tests { req_method: &mut Some(m), #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .expect("parse ok") @@ -1535,6 +1588,7 @@ mod tests { req_method: &mut Some(Method::GET), #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .expect_err("parse should err") @@ -1850,6 +1904,7 @@ mod tests { req_method: &mut Some(Method::GET), #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .expect("parse ok") @@ -1931,6 +1986,7 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .unwrap() @@ -1966,6 +2022,7 @@ mod tests { req_method: &mut None, #[cfg(feature = "ffi")] preserve_header_case: false, + h09_responses: false, }, ) .unwrap() diff --git a/tests/client.rs b/tests/client.rs index 0585695a80..d22f8cf2ca 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -112,6 +112,43 @@ macro_rules! test { headers: { $($response_header_name:expr => $response_header_val:expr,)* }, body: $response_body:expr, ) => ( + test! { + name: $name, + server: + expected: $server_expected, + reply: $server_reply, + client: + set_host: $set_host, + title_case_headers: $title_case_headers, + allow_h09_responses: false, + request: {$( + $c_req_prop: $c_req_val, + )*}, + + response: + status: $client_status, + headers: { $($response_header_name => $response_header_val,)* }, + body: $response_body, + } + ); + ( + name: $name:ident, + server: + expected: $server_expected:expr, + reply: $server_reply:expr, + client: + set_host: $set_host:expr, + title_case_headers: $title_case_headers:expr, + allow_h09_responses: $allow_h09_responses:expr, + request: {$( + $c_req_prop:ident: $c_req_val:tt, + )*}, + + response: + status: $client_status:ident, + headers: { $($response_header_name:expr => $response_header_val:expr,)* }, + body: $response_body:expr, + ) => ( #[test] fn $name() { let _ = pretty_env_logger::try_init(); @@ -127,6 +164,7 @@ macro_rules! test { client: set_host: $set_host, title_case_headers: $title_case_headers, + allow_h09_responses: $allow_h09_responses, request: {$( $c_req_prop: $c_req_val, )*}, @@ -181,6 +219,7 @@ macro_rules! test { client: set_host: true, title_case_headers: false, + allow_h09_responses: false, request: {$( $c_req_prop: $c_req_val, )*}, @@ -205,6 +244,7 @@ macro_rules! test { client: set_host: $set_host:expr, title_case_headers: $title_case_headers:expr, + allow_h09_responses: $allow_h09_responses:expr, request: {$( $c_req_prop:ident: $c_req_val:tt, )*}, @@ -217,6 +257,7 @@ macro_rules! test { let client = Client::builder() .set_host($set_host) .http1_title_case_headers($title_case_headers) + .http09_responses($allow_h09_responses) .build(connector); #[allow(unused_assignments, unused_mut)] @@ -1067,6 +1108,31 @@ test! { body: &b"abc"[..], } +test! { + name: client_allows_http09_when_requested, + + server: + expected: "\ + GET / HTTP/1.1\r\n\ + Host: {addr}\r\n\ + \r\n\ + ", + reply: "Mmmmh, baguettes.", + + client: + set_host: true, + title_case_headers: true, + allow_h09_responses: true, + request: { + method: GET, + url: "http://{addr}/", + }, + response: + status: OK, + headers: {}, + body: &b"Mmmmh, baguettes."[..], +} + mod dispatch_impl { use super::*; use std::io::{self, Read, Write}; From 895e4cf3fbb4119daac06ff446c47ac478468c97 Mon Sep 17 00:00:00 2001 From: Rasmus Nylander <52582625+nylanderdev@users.noreply.github.com> Date: Fri, 26 Mar 2021 19:26:44 +0100 Subject: [PATCH 047/420] refactor(ffi): return null ptr instead of aborting in C API (#2478) Make C API functions that return pointers return null in case of a panic, instead of aborting. Add ffi_fn! macro rules that enable default error values to be returned by writing "?= " after an ffi function's body. --- src/ffi/body.rs | 10 +++++----- src/ffi/client.rs | 6 +++--- src/ffi/http_types.rs | 10 +++++----- src/ffi/io.rs | 2 +- src/ffi/macros.rs | 18 +++++++++++++----- src/ffi/mod.rs | 2 +- src/ffi/task.rs | 10 +++++----- 7 files changed, 33 insertions(+), 25 deletions(-) diff --git a/src/ffi/body.rs b/src/ffi/body.rs index 51e5c8eae9..f942b769e5 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -34,7 +34,7 @@ ffi_fn! { /// If not configured, this body acts as an empty payload. fn hyper_body_new() -> *mut hyper_body { Box::into_raw(Box::new(hyper_body(Body::empty()))) - } + } ?= ptr::null_mut() } ffi_fn! { @@ -66,7 +66,7 @@ ffi_fn! { Box::into_raw(hyper_task::boxed(async move { body.0.data().await.map(|res| res.map(hyper_buf)) })) - } + } ?= ptr::null_mut() } ffi_fn! { @@ -97,7 +97,7 @@ ffi_fn! { } Ok(()) })) - } + } ?= ptr::null_mut() } ffi_fn! { @@ -198,7 +198,7 @@ ffi_fn! { std::slice::from_raw_parts(buf, len) }; Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice)))) - } + } ?= ptr::null_mut() } ffi_fn! { @@ -211,7 +211,7 @@ ffi_fn! { /// consumed/freed. fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 { unsafe { (*buf).0.as_ptr() } - } + } ?= ptr::null() } ffi_fn! { diff --git a/src/ffi/client.rs b/src/ffi/client.rs index a64842b6a3..0351214e09 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -57,7 +57,7 @@ ffi_fn! { hyper_clientconn { tx } }) })) - } + } ?= std::ptr::null_mut() } ffi_fn! { @@ -85,7 +85,7 @@ ffi_fn! { }; Box::into_raw(hyper_task::boxed(fut)) - } + } ?= std::ptr::null_mut() } ffi_fn! { @@ -110,7 +110,7 @@ ffi_fn! { builder: conn::Builder::new(), exec: WeakExec::new(), })) - } + } ?= std::ptr::null_mut() } ffi_fn! { diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index edb43ed31c..1fce28902a 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -37,7 +37,7 @@ ffi_fn! { /// Construct a new HTTP request. fn hyper_request_new() -> *mut hyper_request { Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) - } + } ?= std::ptr::null_mut() } ffi_fn! { @@ -114,7 +114,7 @@ ffi_fn! { /// `hyper_request` has been consumed. fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut()) - } + } ?= std::ptr::null_mut() } ffi_fn! { @@ -170,7 +170,7 @@ ffi_fn! { /// buffer. fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { unsafe { &*resp }.reason_phrase().as_ptr() - } + } ?= std::ptr::null() } ffi_fn! { @@ -210,7 +210,7 @@ ffi_fn! { /// `hyper_response` has been freed. fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut()) - } + } ?= std::ptr::null_mut() } ffi_fn! { @@ -220,7 +220,7 @@ ffi_fn! { fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { let body = std::mem::take(unsafe { &mut *resp }.0.body_mut()); Box::into_raw(Box::new(hyper_body(body))) - } + } ?= std::ptr::null_mut() } impl hyper_response { diff --git a/src/ffi/io.rs b/src/ffi/io.rs index f37b6cb781..0d4a140bde 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -37,7 +37,7 @@ ffi_fn! { write: write_noop, userdata: std::ptr::null_mut(), })) - } + } ?= std::ptr::null_mut() } ffi_fn! { diff --git a/src/ffi/macros.rs b/src/ffi/macros.rs index f4e031a07d..12064d41a2 100644 --- a/src/ffi/macros.rs +++ b/src/ffi/macros.rs @@ -1,5 +1,5 @@ macro_rules! ffi_fn { - ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block ?= $default:expr) => { $(#[$doc])* #[no_mangle] pub extern fn $name($($arg: $arg_ty),*) -> $ret { @@ -8,15 +8,23 @@ macro_rules! ffi_fn { match panic::catch_unwind(AssertUnwindSafe(move || $body)) { Ok(v) => v, Err(_) => { - // TODO: We shouldn't abort, but rather figure out how to - // convert into the return type that the function errored. - eprintln!("panic unwind caught, aborting"); - std::process::abort(); + $default } } } }; + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> $ret $body ?= { + eprintln!("panic unwind caught, aborting"); + std::process::abort() + }); + }; + + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block ?= $default:expr) => { + ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body ?= $default); + }; + ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); }; diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 39f3276bc8..3d2c7a8df4 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -92,5 +92,5 @@ ffi_fn! { /// Returns a static ASCII (null terminated) string of the hyper version. fn hyper_version() -> *const libc::c_char { VERSION_CSTR.as_ptr() as _ - } + } ?= std::ptr::null() } diff --git a/src/ffi/task.rs b/src/ffi/task.rs index b42be59808..e3fb5f44e8 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -189,7 +189,7 @@ ffi_fn! { /// Creates a new task executor. fn hyper_executor_new() -> *const hyper_executor { Arc::into_raw(hyper_executor::new()) - } + } ?= ptr::null() } ffi_fn! { @@ -230,7 +230,7 @@ ffi_fn! { Some(task) => Box::into_raw(task), None => ptr::null_mut(), } - } + } ?= ptr::null_mut() } // ===== impl hyper_task ===== @@ -303,7 +303,7 @@ ffi_fn! { } else { ptr::null_mut() } - } + } ?= ptr::null_mut() } ffi_fn! { @@ -341,7 +341,7 @@ ffi_fn! { } unsafe { &*task }.userdata.0 - } + } ?= ptr::null_mut() } // ===== impl AsTaskType ===== @@ -405,7 +405,7 @@ ffi_fn! { fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { let waker = unsafe { &mut *cx }.0.waker().clone(); Box::into_raw(Box::new(hyper_waker { waker })) - } + } ?= ptr::null_mut() } // ===== impl hyper_waker ===== From 98e7e0bd15642cf9e4bf07b3b03d8b4e538623ba Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 26 Mar 2021 11:29:20 -0700 Subject: [PATCH 048/420] v0.14.5 --- CHANGELOG.md | 16 ++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad0f31e01e..f52fb9f388 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +### v0.14.5 (2021-03-26) + + +#### Bug Fixes + +* **client:** omit default port from automatic Host headers (#2441) ([0b11eee9](https://github.com/hyperium/hyper/commit/0b11eee9bde421cdc1680cadabfd38c5aff8e62f)) +* **headers:** Support multiple Content-Length values on same line (#2471) ([48fdaf16](https://github.com/hyperium/hyper/commit/48fdaf160689f333e9bb63388d0b1d0fa29a1391), closes [#2470](https://github.com/hyperium/hyper/issues/2470)) +* **server:** skip automatic Content-Length headers when not allowed (#2216) ([8cbf9527](https://github.com/hyperium/hyper/commit/8cbf9527dfb313b3f84fcd83260c5c72ce4a1beb), closes [#2215](https://github.com/hyperium/hyper/issues/2215)) + + +#### Features + +* **client:** allow HTTP/0.9 responses behind a flag (#2473) ([68d4e4a3](https://github.com/hyperium/hyper/commit/68d4e4a3db91fb43f41a8c4fce1175ddb56816af), closes [#2468](https://github.com/hyperium/hyper/issues/2468)) +* **server:** add `AddrIncoming::from_listener` constructor (#2439) ([4c946af4](https://github.com/hyperium/hyper/commit/4c946af49cc7fbbc6bd4894283a654625c2ea383)) + + ### v0.14.4 (2021-02-05) diff --git a/Cargo.toml b/Cargo.toml index 10a63999bc..26afc95d8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.4" # don't forget to update html_root_url +version = "0.14.5" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From 96f7222435bc484d203a56ba4c9899690fada5b7 Mon Sep 17 00:00:00 2001 From: pyfisch Date: Tue, 6 Apr 2021 19:43:01 +0200 Subject: [PATCH 049/420] chore(dependencies): Update httpdate to v1.0 (#2485) Previous version would return wrong results for `Ord`. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 26afc95d8f..02d6d333c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ futures-channel = "0.3" futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "0.4" -httpdate = "0.3" +httpdate = "1.0" httparse = "1.0" h2 = { version = "0.3", optional = true } itoa = "0.4.1" From d84c794bade429790b93b9765158d195a29eb403 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 6 Apr 2021 17:46:21 +0000 Subject: [PATCH 050/420] chore(ci): Increase MSRV to 1.46 (#2486) Socket2 v0.4 requires Rust 1.46. --- .github/workflows/CI.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 07d1ea04ca..ef8bcefda4 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -51,7 +51,7 @@ jobs: - stable - beta - nightly - - 1.45.2 + - 1.46 os: - ubuntu-latest @@ -66,6 +66,8 @@ jobs: - rust: nightly features: "--features full,nightly" benches: true + - rust: 1.46 + features: "--features full" runs-on: ${{ matrix.os }} From c7ab1aace102688ca3ad1bba6b5a7c5fd93b21b6 Mon Sep 17 00:00:00 2001 From: Kevin Burke Date: Tue, 6 Apr 2021 11:40:02 -0700 Subject: [PATCH 051/420] docs(ffi): spelling error (#2489) --- src/ffi/io.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ffi/io.rs b/src/ffi/io.rs index 0d4a140bde..7fb4538815 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -7,10 +7,10 @@ use tokio::io::{AsyncRead, AsyncWrite}; use super::task::hyper_context; -/// Sentinal value to return from a read or write callback that the operation +/// Sentinel value to return from a read or write callback that the operation /// is pending. pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; -/// Sentinal value to return from a read or write callback that the operation +/// Sentinel value to return from a read or write callback that the operation /// has errored. pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; From a5464f761adb08e905948d0b4e67d05e68cd7229 Mon Sep 17 00:00:00 2001 From: Kevin Burke Date: Tue, 6 Apr 2021 14:46:14 -0700 Subject: [PATCH 052/420] chore(ffi): run gen_header.sh in CI environment (#2488) Clean up the script so that any unexpected error terminates the script, and stop suppressing errors that may contain useful information (for example, that you are using the stable version but need to use the nightly). This is useful because if hyper.h is not up to date going forward the CI should flag it. As is, there are a bunch of changes to hyper.h that have not been checked in (or were generated by a newer version of the cbindgen script.) Fixes #2483. --- .github/workflows/CI.yml | 40 ++++++- capi/cbindgen.toml | 6 + capi/gen_header.sh | 43 ++++--- capi/include/hyper.h | 250 +++++++++++++++++++++++++++------------ 4 files changed, 241 insertions(+), 98 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index ef8bcefda4..9ffd9f76f8 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -17,6 +17,7 @@ jobs: - test - features - ffi + - ffi-header - doc steps: - run: exit 0 @@ -119,9 +120,7 @@ jobs: ffi: name: Test C API (FFI) needs: [style] - runs-on: ubuntu-latest - steps: - name: Checkout uses: actions/checkout@v1 @@ -147,10 +146,6 @@ jobs: command: build args: --features client,http1,http2,ffi - # TODO: re-enable check once figuring out how to get it working in CI - # - name: Verify cbindgen - # run: ./capi/gen_header.sh --verify - - name: Make Examples run: cd capi/examples && make client @@ -162,6 +157,39 @@ jobs: command: test args: --features full,ffi --lib + ffi-header: + name: Verify hyper.h is up to date + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + default: true + override: true + components: cargo + + - name: Install cbindgen + uses: actions-rs/cargo@v1 + with: + command: install + args: cbindgen + + - name: Build FFI + uses: actions-rs/cargo@v1 + env: + RUSTFLAGS: --cfg hyper_unstable_ffi + with: + command: build + args: --features client,http1,http2,ffi + + - name: Ensure that hyper.h is up to date + run: ./capi/gen_header.sh --verify + doc: name: Build docs needs: [style, test] diff --git a/capi/cbindgen.toml b/capi/cbindgen.toml index aef9c6237b..d1a58234b5 100644 --- a/capi/cbindgen.toml +++ b/capi/cbindgen.toml @@ -1,4 +1,10 @@ +# See https://github.com/eqrion/cbindgen/blob/master/docs.md#cbindgentoml for +# a list of possible configuration values. language = "C" +header = """/* + * Copyright 2021 Sean McArthur. MIT License. + * Generated by gen_header.sh. Do not edit directly. + */""" include_guard = "_HYPER_H" no_includes = true sys_includes = ["stdint.h", "stddef.h"] diff --git a/capi/gen_header.sh b/capi/gen_header.sh index 4cd1a26c23..a3a02e1c60 100755 --- a/capi/gen_header.sh +++ b/capi/gen_header.sh @@ -1,9 +1,13 @@ #!/usr/bin/env bash -CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +# This script regenerates hyper.h. As of April 2021, it only works with the +# nightly build of Rust. + +set -e -WORK_DIR=`mktemp -d` +CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +WORK_DIR=$(mktemp -d) # check if tmp dir was created if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then @@ -14,9 +18,8 @@ fi header_file_backup="$CAPI_DIR/include/hyper.h.backup" function cleanup { - #echo "$WORK_DIR" rm -rf "$WORK_DIR" - rm "$header_file_backup" + rm "$header_file_backup" || true } trap cleanup EXIT @@ -44,10 +47,14 @@ cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" #cargo metadata --no-default-features --features ffi --format-version 1 > "$WORK_DIR/metadata.json" -cd $WORK_DIR +cd "${WORK_DIR}" || exit 2 # Expand just the ffi module -cargo rustc -- -Z unstable-options --pretty=expanded > expanded.rs 2>/dev/null +if ! output=$(cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then + # As of April 2021 the script above prints a lot of warnings/errors, and + # exits with a nonzero return code, but hyper.h still gets generated. + echo "$output" +fi # Replace the previous copy with the single expanded file rm -rf ./src @@ -56,17 +63,17 @@ mv expanded.rs src/lib.rs # Bindgen! -cbindgen\ - -c "$CAPI_DIR/cbindgen.toml"\ - --lockfile "$CAPI_DIR/../Cargo.lock"\ - -o "$CAPI_DIR/include/hyper.h"\ - $1 - -bindgen_exit_code=$? - -if [[ "--verify" == "$1" && "$bindgen_exit_code" != 0 ]]; then - echo "diff generated (<) vs backup (>)" - diff "$CAPI_DIR/include/hyper.h" "$header_file_backup" +if ! cbindgen \ + --config "$CAPI_DIR/cbindgen.toml" \ + --lockfile "$CAPI_DIR/../Cargo.lock" \ + --output "$CAPI_DIR/include/hyper.h" \ + "${@}"; then + bindgen_exit_code=$? + if [[ "--verify" == "$1" ]]; then + echo "diff generated (<) vs backup (>)" + diff "$CAPI_DIR/include/hyper.h" "$header_file_backup" + fi + exit $bindgen_exit_code fi -exit $bindgen_exit_code +exit 0 diff --git a/capi/include/hyper.h b/capi/include/hyper.h index cfc14a25fa..a305dc4a09 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -1,32 +1,78 @@ +/* + * Copyright 2021 Sean McArthur. MIT License. + * Generated by gen_header.sh. Do not edit directly. + */ + #ifndef _HYPER_H #define _HYPER_H #include #include +/* + Return in iter functions to continue iterating. + */ #define HYPER_ITER_CONTINUE 0 +/* + Return in iter functions to stop iterating. + */ #define HYPER_ITER_BREAK 1 +/* + An HTTP Version that is unspecified. + */ #define HYPER_HTTP_VERSION_NONE 0 +/* + The HTTP/1.0 version. + */ #define HYPER_HTTP_VERSION_1_0 10 +/* + The HTTP/1.1 version. + */ #define HYPER_HTTP_VERSION_1_1 11 +/* + The HTTP/2 version. + */ #define HYPER_HTTP_VERSION_2 20 +/* + Sentinel value to return from a read or write callback that the operation + is pending. + */ #define HYPER_IO_PENDING 4294967295 +/* + Sentinel value to return from a read or write callback that the operation + has errored. + */ #define HYPER_IO_ERROR 4294967294 +/* + Return in a poll function to indicate it was ready. + */ #define HYPER_POLL_READY 0 +/* + Return in a poll function to indicate it is still pending. + + The passed in `hyper_waker` should be registered to wake up the task at + some later point. + */ #define HYPER_POLL_PENDING 1 +/* + Return in a poll function indicate an error. + */ #define HYPER_POLL_ERROR 3 -typedef enum { +/* + A return code for many of hyper's methods. + */ +typedef enum hyper_code { /* All is well. */ @@ -60,7 +106,10 @@ typedef enum { HYPERE_INVALID_PEER_MESSAGE, } hyper_code; -typedef enum { +/* + A descriptor for what type a `hyper_task` value is. + */ +typedef enum hyper_task_return_type { /* The value of this task is null (does not imply an error). */ @@ -83,41 +132,86 @@ typedef enum { HYPER_TASK_BUF, } hyper_task_return_type; -typedef struct hyper_executor hyper_executor; - -typedef struct hyper_io hyper_io; - -typedef struct hyper_task hyper_task; - +/* + A streaming HTTP body. + */ typedef struct hyper_body hyper_body; +/* + A buffer of bytes that is sent or received on a `hyper_body`. + */ typedef struct hyper_buf hyper_buf; +/* + An HTTP client connection handle. + + These are used to send a request on a single connection. It's possible to + send multiple requests on a single connection, such as when HTTP/1 + keep-alive or HTTP/2 is used. + */ typedef struct hyper_clientconn hyper_clientconn; +/* + An options builder to configure an HTTP client connection. + */ typedef struct hyper_clientconn_options hyper_clientconn_options; +/* + An async context for a task that contains the related waker. + */ typedef struct hyper_context hyper_context; +/* + A more detailed error object returned by some hyper functions. + */ typedef struct hyper_error hyper_error; +/* + A task executor for `hyper_task`s. + */ +typedef struct hyper_executor hyper_executor; + +/* + An HTTP header map. + + These can be part of a request or response. + */ typedef struct hyper_headers hyper_headers; +/* + An IO object used to represent a socket or similar concept. + */ +typedef struct hyper_io hyper_io; + +/* + An HTTP request. + */ typedef struct hyper_request hyper_request; +/* + An HTTP response. + */ typedef struct hyper_response hyper_response; +/* + An async task. + */ +typedef struct hyper_task hyper_task; + +/* + A waker that is saved and used to waken a pending task. + */ typedef struct hyper_waker hyper_waker; -typedef int (*hyper_body_foreach_callback)(void*, const hyper_buf*); +typedef int (*hyper_body_foreach_callback)(void*, const struct hyper_buf*); -typedef int (*hyper_body_data_callback)(void*, hyper_context*, hyper_buf**); +typedef int (*hyper_body_data_callback)(void*, struct hyper_context*, struct hyper_buf**); typedef int (*hyper_headers_foreach_callback)(void*, const uint8_t*, size_t, const uint8_t*, size_t); -typedef size_t (*hyper_io_read_callback)(void*, hyper_context*, uint8_t*, size_t); +typedef size_t (*hyper_io_read_callback)(void*, struct hyper_context*, uint8_t*, size_t); -typedef size_t (*hyper_io_write_callback)(void*, hyper_context*, const uint8_t*, size_t); +typedef size_t (*hyper_io_write_callback)(void*, struct hyper_context*, const uint8_t*, size_t); #ifdef __cplusplus extern "C" { @@ -133,12 +227,12 @@ const char *hyper_version(void); If not configured, this body acts as an empty payload. */ -hyper_body *hyper_body_new(void); +struct hyper_body *hyper_body_new(void); /* Free a `hyper_body *`. */ -void hyper_body_free(hyper_body *body); +void hyper_body_free(struct hyper_body *body); /* Return a task that will poll the body for the next buffer of data. @@ -152,7 +246,7 @@ void hyper_body_free(hyper_body *body); This does not consume the `hyper_body *`, so it may be used to again. However, it MUST NOT be used or freed until the related task completes. */ -hyper_task *hyper_body_data(hyper_body *body); +struct hyper_task *hyper_body_data(struct hyper_body *body); /* Return a task that will poll the body and execute the callback with each @@ -166,12 +260,14 @@ hyper_task *hyper_body_data(hyper_body *body); This will consume the `hyper_body *`, you shouldn't use it anymore or free it. */ -hyper_task *hyper_body_foreach(hyper_body *body, hyper_body_foreach_callback func, void *userdata); +struct hyper_task *hyper_body_foreach(struct hyper_body *body, + hyper_body_foreach_callback func, + void *userdata); /* Set userdata on this body, which will be passed to callback functions. */ -void hyper_body_set_userdata(hyper_body *body, void *userdata); +void hyper_body_set_userdata(struct hyper_body *body, void *userdata); /* Set the data callback for this body. @@ -194,7 +290,7 @@ void hyper_body_set_userdata(hyper_body *body, void *userdata); If some error has occurred, you can return `HYPER_POLL_ERROR` to abort the body. */ -void hyper_body_set_data_func(hyper_body *body, hyper_body_data_callback func); +void hyper_body_set_data_func(struct hyper_body *body, hyper_body_data_callback func); /* Create a new `hyper_buf *` by copying the provided bytes. @@ -202,7 +298,7 @@ void hyper_body_set_data_func(hyper_body *body, hyper_body_data_callback func); This makes an owned copy of the bytes, so the `buf` argument can be freed or changed afterwards. */ -hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len); +struct hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len); /* Get a pointer to the bytes in this buffer. @@ -213,17 +309,17 @@ hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len); This pointer is borrowed data, and not valid once the `hyper_buf` is consumed/freed. */ -const uint8_t *hyper_buf_bytes(const hyper_buf *buf); +const uint8_t *hyper_buf_bytes(const struct hyper_buf *buf); /* Get the length of the bytes this buffer contains. */ -size_t hyper_buf_len(const hyper_buf *buf); +size_t hyper_buf_len(const struct hyper_buf *buf); /* Free this buffer. */ -void hyper_buf_free(hyper_buf *buf); +void hyper_buf_free(struct hyper_buf *buf); /* Starts an HTTP client connection handshake using the provided IO transport @@ -234,7 +330,8 @@ void hyper_buf_free(hyper_buf *buf); The returned `hyper_task *` must be polled with an executor until the handshake completes, at which point the value can be taken. */ -hyper_task *hyper_clientconn_handshake(hyper_io *io, hyper_clientconn_options *options); +struct hyper_task *hyper_clientconn_handshake(struct hyper_io *io, + struct hyper_clientconn_options *options); /* Send a request on the client connection. @@ -242,46 +339,47 @@ hyper_task *hyper_clientconn_handshake(hyper_io *io, hyper_clientconn_options *o Returns a task that needs to be polled until it is ready. When ready, the task yields a `hyper_response *`. */ -hyper_task *hyper_clientconn_send(hyper_clientconn *conn, hyper_request *req); +struct hyper_task *hyper_clientconn_send(struct hyper_clientconn *conn, struct hyper_request *req); /* Free a `hyper_clientconn *`. */ -void hyper_clientconn_free(hyper_clientconn *conn); +void hyper_clientconn_free(struct hyper_clientconn *conn); /* Creates a new set of HTTP clientconn options to be used in a handshake. */ -hyper_clientconn_options *hyper_clientconn_options_new(void); +struct hyper_clientconn_options *hyper_clientconn_options_new(void); /* Free a `hyper_clientconn_options *`. */ -void hyper_clientconn_options_free(hyper_clientconn_options *opts); +void hyper_clientconn_options_free(struct hyper_clientconn_options *opts); /* Set the client background task executor. This does not consume the `options` or the `exec`. */ -void hyper_clientconn_options_exec(hyper_clientconn_options *opts, const hyper_executor *exec); +void hyper_clientconn_options_exec(struct hyper_clientconn_options *opts, + const struct hyper_executor *exec); /* Set the whether to use HTTP2. Pass `0` to disable, `1` to enable. */ -hyper_code hyper_clientconn_options_http2(hyper_clientconn_options *opts, int enabled); +enum hyper_code hyper_clientconn_options_http2(struct hyper_clientconn_options *opts, int enabled); /* Frees a `hyper_error`. */ -void hyper_error_free(hyper_error *err); +void hyper_error_free(struct hyper_error *err); /* Get an equivalent `hyper_code` from this error. */ -hyper_code hyper_error_code(const hyper_error *err); +enum hyper_code hyper_error_code(const struct hyper_error *err); /* Print the details of this error to a buffer. @@ -291,27 +389,31 @@ hyper_code hyper_error_code(const hyper_error *err); The return value is number of bytes that were written to `dst`. */ -size_t hyper_error_print(const hyper_error *err, uint8_t *dst, size_t dst_len); +size_t hyper_error_print(const struct hyper_error *err, uint8_t *dst, size_t dst_len); /* Construct a new HTTP request. */ -hyper_request *hyper_request_new(void); +struct hyper_request *hyper_request_new(void); /* Free an HTTP request if not going to send it on a client. */ -void hyper_request_free(hyper_request *req); +void hyper_request_free(struct hyper_request *req); /* Set the HTTP Method of the request. */ -hyper_code hyper_request_set_method(hyper_request *req, const uint8_t *method, size_t method_len); +enum hyper_code hyper_request_set_method(struct hyper_request *req, + const uint8_t *method, + size_t method_len); /* Set the URI of the request. */ -hyper_code hyper_request_set_uri(hyper_request *req, const uint8_t *uri, size_t uri_len); +enum hyper_code hyper_request_set_uri(struct hyper_request *req, + const uint8_t *uri, + size_t uri_len); /* Set the preferred HTTP version of the request. @@ -321,7 +423,7 @@ hyper_code hyper_request_set_uri(hyper_request *req, const uint8_t *uri, size_t Note that this won't change the major HTTP version of the connection, since that is determined at the handshake step. */ -hyper_code hyper_request_set_version(hyper_request *req, int version); +enum hyper_code hyper_request_set_version(struct hyper_request *req, int version); /* Gets a reference to the HTTP headers of this request @@ -329,7 +431,7 @@ hyper_code hyper_request_set_version(hyper_request *req, int version); This is not an owned reference, so it should not be accessed after the `hyper_request` has been consumed. */ -hyper_headers *hyper_request_headers(hyper_request *req); +struct hyper_headers *hyper_request_headers(struct hyper_request *req); /* Set the body of the request. @@ -339,19 +441,19 @@ hyper_headers *hyper_request_headers(hyper_request *req); This takes ownership of the `hyper_body *`, you must not use it or free it after setting it on the request. */ -hyper_code hyper_request_set_body(hyper_request *req, hyper_body *body); +enum hyper_code hyper_request_set_body(struct hyper_request *req, struct hyper_body *body); /* Free an HTTP response after using it. */ -void hyper_response_free(hyper_response *resp); +void hyper_response_free(struct hyper_response *resp); /* Get the HTTP-Status code of this response. It will always be within the range of 100-599. */ -uint16_t hyper_response_status(const hyper_response *resp); +uint16_t hyper_response_status(const struct hyper_response *resp); /* Get a pointer to the reason-phrase of this response. @@ -364,14 +466,14 @@ uint16_t hyper_response_status(const hyper_response *resp); Use `hyper_response_reason_phrase_len()` to get the length of this buffer. */ -const uint8_t *hyper_response_reason_phrase(const hyper_response *resp); +const uint8_t *hyper_response_reason_phrase(const struct hyper_response *resp); /* Get the length of the reason-phrase of this response. Use `hyper_response_reason_phrase()` to get the buffer pointer. */ -size_t hyper_response_reason_phrase_len(const hyper_response *resp); +size_t hyper_response_reason_phrase_len(const struct hyper_response *resp); /* Get the HTTP version used by this response. @@ -383,7 +485,7 @@ size_t hyper_response_reason_phrase_len(const hyper_response *resp); - `HYPER_HTTP_VERSION_2` - `HYPER_HTTP_VERSION_NONE` if newer (or older). */ -int hyper_response_version(const hyper_response *resp); +int hyper_response_version(const struct hyper_response *resp); /* Gets a reference to the HTTP headers of this response. @@ -391,14 +493,14 @@ int hyper_response_version(const hyper_response *resp); This is not an owned reference, so it should not be accessed after the `hyper_response` has been freed. */ -hyper_headers *hyper_response_headers(hyper_response *resp); +struct hyper_headers *hyper_response_headers(struct hyper_response *resp); /* Take ownership of the body of this response. It is safe to free the response even after taking ownership of its body. */ -hyper_body *hyper_response_body(hyper_response *resp); +struct hyper_body *hyper_response_body(struct hyper_response *resp); /* Iterates the headers passing each name and value pair to the callback. @@ -408,7 +510,7 @@ hyper_body *hyper_response_body(hyper_response *resp); The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or `HYPER_ITER_BREAK` to stop. */ -void hyper_headers_foreach(const hyper_headers *headers, +void hyper_headers_foreach(const struct hyper_headers *headers, hyper_headers_foreach_callback func, void *userdata); @@ -417,11 +519,11 @@ void hyper_headers_foreach(const hyper_headers *headers, This overwrites any previous value set for the header. */ -hyper_code hyper_headers_set(hyper_headers *headers, - const uint8_t *name, - size_t name_len, - const uint8_t *value, - size_t value_len); +enum hyper_code hyper_headers_set(struct hyper_headers *headers, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len); /* Adds the provided value to the list of the provided name. @@ -429,11 +531,11 @@ hyper_code hyper_headers_set(hyper_headers *headers, If there were already existing values for the name, this will append the new value to the internal list. */ -hyper_code hyper_headers_add(hyper_headers *headers, - const uint8_t *name, - size_t name_len, - const uint8_t *value, - size_t value_len); +enum hyper_code hyper_headers_add(struct hyper_headers *headers, + const uint8_t *name, + size_t name_len, + const uint8_t *value, + size_t value_len); /* Create a new IO type used to represent a transport. @@ -441,7 +543,7 @@ hyper_code hyper_headers_add(hyper_headers *headers, The read and write functions of this transport should be set with `hyper_io_set_read` and `hyper_io_set_write`. */ -hyper_io *hyper_io_new(void); +struct hyper_io *hyper_io_new(void); /* Free an unused `hyper_io *`. @@ -449,14 +551,14 @@ hyper_io *hyper_io_new(void); This is typically only useful if you aren't going to pass ownership of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. */ -void hyper_io_free(hyper_io *io); +void hyper_io_free(struct hyper_io *io); /* Set the user data pointer for this IO to some value. This value is passed as an argument to the read and write callbacks. */ -void hyper_io_set_userdata(hyper_io *io, void *data); +void hyper_io_set_userdata(struct hyper_io *io, void *data); /* Set the read function for this IO transport. @@ -476,7 +578,7 @@ void hyper_io_set_userdata(hyper_io *io, void *data); If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` should be the return value. */ -void hyper_io_set_read(hyper_io *io, hyper_io_read_callback func); +void hyper_io_set_read(struct hyper_io *io, hyper_io_read_callback func); /* Set the write function for this IO transport. @@ -493,17 +595,17 @@ void hyper_io_set_read(hyper_io *io, hyper_io_read_callback func); If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` should be the return value. */ -void hyper_io_set_write(hyper_io *io, hyper_io_write_callback func); +void hyper_io_set_write(struct hyper_io *io, hyper_io_write_callback func); /* Creates a new task executor. */ -const hyper_executor *hyper_executor_new(void); +const struct hyper_executor *hyper_executor_new(void); /* Frees an executor and any incomplete tasks still part of it. */ -void hyper_executor_free(const hyper_executor *exec); +void hyper_executor_free(const struct hyper_executor *exec); /* Push a task onto the executor. @@ -511,7 +613,7 @@ void hyper_executor_free(const hyper_executor *exec); The executor takes ownership of the task, it should not be accessed again unless returned back to the user with `hyper_executor_poll`. */ -hyper_code hyper_executor_push(const hyper_executor *exec, hyper_task *task); +enum hyper_code hyper_executor_push(const struct hyper_executor *exec, struct hyper_task *task); /* Polls the executor, trying to make progress on any tasks that have notified @@ -521,12 +623,12 @@ hyper_code hyper_executor_push(const hyper_executor *exec, hyper_task *task); If there are no ready tasks, this returns `NULL`. */ -hyper_task *hyper_executor_poll(const hyper_executor *exec); +struct hyper_task *hyper_executor_poll(const struct hyper_executor *exec); /* Free a task. */ -void hyper_task_free(hyper_task *task); +void hyper_task_free(struct hyper_task *task); /* Takes the output value of this task. @@ -536,12 +638,12 @@ void hyper_task_free(hyper_task *task); Use `hyper_task_type` to determine the type of the `void *` return value. */ -void *hyper_task_value(hyper_task *task); +void *hyper_task_value(struct hyper_task *task); /* Query the return type of this task. */ -hyper_task_return_type hyper_task_type(hyper_task *task); +enum hyper_task_return_type hyper_task_type(struct hyper_task *task); /* Set a user data pointer to be associated with this task. @@ -549,27 +651,27 @@ hyper_task_return_type hyper_task_type(hyper_task *task); This value will be passed to task callbacks, and can be checked later with `hyper_task_userdata`. */ -void hyper_task_set_userdata(hyper_task *task, void *userdata); +void hyper_task_set_userdata(struct hyper_task *task, void *userdata); /* Retrieve the userdata that has been set via `hyper_task_set_userdata`. */ -void *hyper_task_userdata(hyper_task *task); +void *hyper_task_userdata(struct hyper_task *task); /* Copies a waker out of the task context. */ -hyper_waker *hyper_context_waker(hyper_context *cx); +struct hyper_waker *hyper_context_waker(struct hyper_context *cx); /* Free a waker that hasn't been woken. */ -void hyper_waker_free(hyper_waker *waker); +void hyper_waker_free(struct hyper_waker *waker); /* Free a waker that hasn't been woken. */ -void hyper_waker_wake(hyper_waker *waker); +void hyper_waker_wake(struct hyper_waker *waker); #ifdef __cplusplus } // extern "C" From aa4a2eaa44ce4eb8a0049e4363f59f323ed56854 Mon Sep 17 00:00:00 2001 From: Kevin Burke Date: Tue, 6 Apr 2021 14:49:37 -0700 Subject: [PATCH 053/420] chore(LICENSE): update license year (#2491) --- LICENSE | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/LICENSE b/LICENSE index ac660a41c9..bc1e966ed9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Sean McArthur +Copyright (c) 2014-2021 Sean McArthur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -17,4 +17,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - From ed2fdb7b6a2963cea7577df05ddc41c56fee7246 Mon Sep 17 00:00:00 2001 From: Kevin Burke Date: Wed, 7 Apr 2021 16:12:02 -0700 Subject: [PATCH 054/420] chore(ffi): fix compile errors and warnings (#2492) As I understand it, "cargo rustc" in gen_header.sh generates a ton of errors, but still manages to generate an object that can be used by cbindgen to generate hyper.h. However, I tried to make a separate change to add more fields to hyper.h, and learned that "cargo rustc" stops if it reaches 50 errors, which I reached. I was able to buy some headroom and fix a number of the compilation errors by adding imports to the fake Cargo.toml we generate in gen_header.sh. I wasn't sure how to resolve imports like "crate::Result" which appear to reference the top-level src/error.rs, and print an error when they are compiled in gen_header.sh. But I only need to buy headroom under the 50 error count for now, which I was able to do by adding the imports. It is possible that someone more familiar with Rust than me could look at this and know what to change to get the total number of errors to zero. --- capi/gen_header.sh | 23 ++++++++++++++++++++++- src/ffi/mod.rs | 3 --- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/capi/gen_header.sh b/capi/gen_header.sh index a3a02e1c60..9319857d97 100755 --- a/capi/gen_header.sh +++ b/capi/gen_header.sh @@ -41,6 +41,27 @@ edition = "2018" publish = false [dependencies] +# Determined which dependencies we need by running the "cargo rustc" command +# below and watching the compile error output for references to unknown imports, +# until we didn't get any errors. +bytes = "1" +futures-channel = "0.3" +futures-util = { version = "0.3", default-features = false, features = ["alloc"] } +libc = { version = "0.2", optional = true } +http = "0.2" +http-body = "0.4" +tokio = { version = "1", features = ["rt"] } + +[features] +default = [ + "client", + "ffi", + "http1", +] + +http1 = [] +client = [] +ffi = ["libc", "tokio/rt"] EOF cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" @@ -50,7 +71,7 @@ cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" cd "${WORK_DIR}" || exit 2 # Expand just the ffi module -if ! output=$(cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then +if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then # As of April 2021 the script above prints a lot of warnings/errors, and # exits with a nonzero return code, but hyper.h still gets generated. echo "$output" diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 3d2c7a8df4..83011ff0fc 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -61,9 +61,6 @@ pub use self::http_types::*; pub use self::io::*; pub use self::task::*; -pub(crate) use self::body::UserBody; -pub(crate) use self::http_types::{HeaderCaseMap, ReasonPhrase}; - /// Return in iter functions to continue iterating. pub const HYPER_ITER_CONTINUE: libc::c_int = 0; /// Return in iter functions to stop iterating. From 11345394d968d4817e1a0ee2550228ac0ae7ce74 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Tue, 20 Apr 2021 23:17:48 +0200 Subject: [PATCH 055/420] feat(client): add option to allow misplaced spaces in HTTP/1 responses (#2506) --- Cargo.toml | 2 +- src/client/client.rs | 25 +++++++++++++++++++ src/client/conn.rs | 12 +++++++++ src/proto/h1/conn.rs | 14 ++++++++++- src/proto/h1/io.rs | 9 +++++-- src/proto/h1/mod.rs | 2 ++ src/proto/h1/role.rs | 58 +++++++++++++++++++++++++++++++++++++++++++- 7 files changed, 117 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 02d6d333c4..f80170c6cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "0.4" httpdate = "1.0" -httparse = "1.0" +httparse = "1.4" h2 = { version = "0.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } diff --git a/src/client/client.rs b/src/client/client.rs index 418f3fb4e9..d054d68076 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -961,6 +961,31 @@ impl Builder { self } + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { + self.conn_builder + .h1_allow_spaces_after_header_name_in_responses(val); + self + } + /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// diff --git a/src/client/conn.rs b/src/client/conn.rs index b87600d85a..e65986bbf0 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -56,6 +56,7 @@ use std::time::Duration; use bytes::Bytes; use futures_util::future::{self, Either, FutureExt as _}; +use httparse::ParserConfig; use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; @@ -123,6 +124,7 @@ where pub struct Builder { pub(super) exec: Exec, h09_responses: bool, + h1_parser_config: ParserConfig, h1_title_case_headers: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, @@ -496,6 +498,7 @@ impl Builder { exec: Exec::Default, h09_responses: false, h1_read_buf_exact_size: None, + h1_parser_config: Default::default(), h1_title_case_headers: false, h1_max_buf_size: None, #[cfg(feature = "http2")] @@ -521,6 +524,14 @@ impl Builder { self } + pub(crate) fn h1_allow_spaces_after_header_name_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config.allow_spaces_after_header_name_in_responses(enabled); + self + } + pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { self.h1_title_case_headers = enabled; self @@ -704,6 +715,7 @@ impl Builder { #[cfg(feature = "http1")] Proto::Http1 => { let mut conn = proto::Conn::new(io); + conn.set_h1_parser_config(opts.h1_parser_config); if opts.h1_title_case_headers { conn.set_title_case_headers(); } diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index ce0848ddea..01ec59528d 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -5,6 +5,7 @@ use std::marker::PhantomData; use bytes::{Buf, Bytes}; use http::header::{HeaderValue, CONNECTION}; use http::{HeaderMap, Method, Version}; +use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; use super::io::Buffered; @@ -44,6 +45,7 @@ where error: None, keep_alive: KA::Busy, method: None, + h1_parser_config: ParserConfig::default(), #[cfg(feature = "ffi")] preserve_header_case: false, title_case_headers: false, @@ -79,6 +81,11 @@ where self.state.title_case_headers = true; } + #[cfg(feature = "client")] + pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { + self.state.h1_parser_config = parser_config; + } + #[cfg(feature = "client")] pub(crate) fn set_h09_responses(&mut self) { self.state.h09_responses = true; @@ -150,6 +157,7 @@ where ParseContext { cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, + h1_parser_config: self.state.h1_parser_config.clone(), #[cfg(feature = "ffi")] preserve_header_case: self.state.preserve_header_case, h09_responses: self.state.h09_responses, @@ -284,7 +292,10 @@ where ret } - pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_keep_alive( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body()); if self.is_read_closed() { @@ -760,6 +771,7 @@ struct State { /// This is used to know things such as if the message can include /// a body or not. method: Option, + h1_parser_config: ParserConfig, #[cfg(feature = "ffi")] preserve_header_case: bool, title_case_headers: bool, diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index c7ce48664b..dc6e72c146 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -159,6 +159,7 @@ where ParseContext { cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, + h1_parser_config: parse_ctx.h1_parser_config.clone(), #[cfg(feature = "ffi")] preserve_header_case: parse_ctx.preserve_header_case, h09_responses: parse_ctx.h09_responses, @@ -183,7 +184,10 @@ where } } - pub(crate) fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_read_from_io( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll> { self.read_blocked = false; let next = self.read_buf_strategy.next(); if self.read_buf_remaining_mut() < next { @@ -378,7 +382,7 @@ impl ReadStrategy { *decrease_now = false; } } - }, + } #[cfg(feature = "client")] ReadStrategy::Exact(_) => (), } @@ -639,6 +643,7 @@ mod tests { let parse_ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 01a9253fa3..3934502e27 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -1,5 +1,6 @@ use bytes::BytesMut; use http::{HeaderMap, Method}; +use httparse::ParserConfig; use crate::body::DecodedLength; use crate::proto::{BodyLength, MessageHead}; @@ -70,6 +71,7 @@ pub(crate) struct ParsedMessage { pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, + h1_parser_config: ParserConfig, #[cfg(feature = "ffi")] preserve_header_case: bool, h09_responses: bool, diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index ea9dc96be1..1493943175 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -683,7 +683,8 @@ impl Http1Transaction for Client { ); let mut res = httparse::Response::new(&mut headers); let bytes = buf.as_ref(); - match res.parse(bytes) { + match ctx.h1_parser_config.parse_response(&mut res, bytes) + { Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; @@ -1231,6 +1232,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut method, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1254,6 +1256,7 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1272,6 +1275,7 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1288,6 +1292,7 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: true, @@ -1306,6 +1311,7 @@ mod tests { let ctx = ParseContext { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1314,6 +1320,48 @@ mod tests { assert_eq!(raw, H09_RESPONSE); } + const RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &'static str = + "HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\n\r\n"; + + #[test] + fn test_parse_allow_response_with_spaces_before_colons() { + use httparse::ParserConfig; + + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let mut h1_parser_config = ParserConfig::default(); + h1_parser_config.allow_spaces_after_header_name_in_responses(true); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config, + #[cfg(feature = "ffi")] + preserve_header_case: false, + h09_responses: false, + }; + let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); + assert_eq!(raw.len(), 0); + assert_eq!(msg.head.subject, crate::StatusCode::OK); + assert_eq!(msg.head.version, crate::Version::HTTP_11); + assert_eq!(msg.head.headers.len(), 1); + assert_eq!(msg.head.headers["Access-Control-Allow-Credentials"], "true"); + } + + #[test] + fn test_parse_reject_response_with_spaces_before_colons() { + let _ = pretty_env_logger::try_init(); + let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut Some(crate::Method::GET), + h1_parser_config: Default::default(), + #[cfg(feature = "ffi")] + preserve_header_case: false, + h09_responses: false, + }; + Client::parse(&mut raw, ctx).unwrap_err(); + } + #[test] fn test_decoder_request() { fn parse(s: &str) -> ParsedMessage { @@ -1323,6 +1371,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1339,6 +1388,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1554,6 +1604,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1570,6 +1621,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(m), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1586,6 +1638,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1902,6 +1955,7 @@ mod tests { ParseContext { cached_headers: &mut None, req_method: &mut Some(Method::GET), + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -1984,6 +2038,7 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, @@ -2020,6 +2075,7 @@ mod tests { ParseContext { cached_headers: &mut headers, req_method: &mut None, + h1_parser_config: Default::default(), #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, From 117cc492a62c4051c75e7eec0f624b30db8a20e5 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 20 Apr 2021 17:23:10 -0700 Subject: [PATCH 056/420] chore(ci): quiet the warnings when verifying the generated header file (#2507) --- capi/gen_header.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/capi/gen_header.sh b/capi/gen_header.sh index 9319857d97..a4460c6295 100755 --- a/capi/gen_header.sh +++ b/capi/gen_header.sh @@ -74,7 +74,15 @@ cd "${WORK_DIR}" || exit 2 if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then # As of April 2021 the script above prints a lot of warnings/errors, and # exits with a nonzero return code, but hyper.h still gets generated. - echo "$output" + # + # However, on Github Actions, this will result in automatic "annotations" + # being added to files not related to a PR, so if this is `--verify` mode, + # then don't show it. + # + # But yes show it when using it locally. + if [[ "--verify" != "$1" ]]; then + echo "$output" + fi fi # Replace the previous copy with the single expanded file From dbea7716f157896bf7d2d417be7b4e382e7dc34f Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 21 Apr 2021 18:50:35 +0200 Subject: [PATCH 057/420] feat(http1): add options to preserve header casing (#2480) Decouple preserving header case from FFI: The feature is now supported in both the server and the client and can be combined with the title case feature, for headers which don't have entries in the header case map. Closes #2313 --- src/client/client.rs | 11 + src/client/conn.rs | 10 + src/ext.rs | 64 +++++ src/ffi/client.rs | 5 +- src/ffi/http_types.rs | 45 +-- src/lib.rs | 1 + src/proto/h1/conn.rs | 22 +- src/proto/h1/io.rs | 2 - src/proto/h1/mod.rs | 1 - src/proto/h1/role.rs | 644 +++++++++++++++++++++++++++++++++--------- src/server/conn.rs | 19 ++ src/server/server.rs | 13 + 12 files changed, 656 insertions(+), 181 deletions(-) create mode 100644 src/ext.rs diff --git a/src/client/client.rs b/src/client/client.rs index d054d68076..a219eb14ad 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -997,6 +997,17 @@ impl Builder { self } + /// Set whether HTTP/1 connections will write header names as provided + /// at the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { + self.conn_builder.h1_preserve_header_case(val); + self + } + /// Set whether HTTP/0.9 responses should be tolerated. /// /// Default is false. diff --git a/src/client/conn.rs b/src/client/conn.rs index e65986bbf0..ef98fd30ce 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -126,6 +126,7 @@ pub struct Builder { h09_responses: bool, h1_parser_config: ParserConfig, h1_title_case_headers: bool, + h1_preserve_header_case: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, #[cfg(feature = "http2")] @@ -500,6 +501,7 @@ impl Builder { h1_read_buf_exact_size: None, h1_parser_config: Default::default(), h1_title_case_headers: false, + h1_preserve_header_case: false, h1_max_buf_size: None, #[cfg(feature = "http2")] h2_builder: Default::default(), @@ -537,6 +539,11 @@ impl Builder { self } + pub(crate) fn h1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_case = enabled; + self + } + pub(super) fn h1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { self.h1_read_buf_exact_size = sz; self.h1_max_buf_size = None; @@ -719,6 +726,9 @@ impl Builder { if opts.h1_title_case_headers { conn.set_title_case_headers(); } + if opts.h1_preserve_header_case { + conn.set_preserve_header_case(); + } if opts.h09_responses { conn.set_h09_responses(); } diff --git a/src/ext.rs b/src/ext.rs new file mode 100644 index 0000000000..10cde75970 --- /dev/null +++ b/src/ext.rs @@ -0,0 +1,64 @@ +//! HTTP extensions + +use bytes::Bytes; +#[cfg(feature = "http1")] +use http::header::{HeaderName, IntoHeaderName, ValueIter}; +use http::HeaderMap; + +/// A map from header names to their original casing as received in an HTTP message. +/// +/// If an HTTP/1 response `res` is parsed on a connection whose option +/// [`http1_preserve_header_case`] was set to true and the response included +/// the following headers: +/// +/// ```ignore +/// x-Bread: Baguette +/// X-BREAD: Pain +/// x-bread: Ficelle +/// ``` +/// +/// Then `res.extensions().get::()` will return a map with: +/// +/// ```ignore +/// HeaderCaseMap({ +/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"], +/// }) +/// ``` +/// +/// [`http1_preserve_header_case`]: /client/struct.Client.html#method.http1_preserve_header_case +#[derive(Clone, Debug)] +pub(crate) struct HeaderCaseMap(HeaderMap); + +#[cfg(feature = "http1")] +impl HeaderCaseMap { + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + pub(crate) fn get_all<'a>( + &'a self, + name: &HeaderName, + ) -> impl Iterator + 'a> + 'a { + self.get_all_internal(name).into_iter() + } + + /// Returns a view of all spellings associated with that header name, + /// in the order they were found. + pub(crate) fn get_all_internal<'a>(&'a self, name: &HeaderName) -> ValueIter<'_, Bytes> { + self.0.get_all(name).into_iter() + } + + pub(crate) fn default() -> Self { + Self(Default::default()) + } + + #[cfg(any(test, feature = "ffi"))] + pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { + self.0.insert(name, orig); + } + + pub(crate) fn append(&mut self, name: N, orig: Bytes) + where + N: IntoHeaderName, + { + self.0.append(name, orig); + } +} diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 0351214e09..9be4f5a04d 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -106,8 +106,11 @@ unsafe impl AsTaskType for hyper_clientconn { ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { + let mut builder = conn::Builder::new(); + builder.h1_preserve_header_case(true); + Box::into_raw(Box::new(hyper_clientconn_options { - builder: conn::Builder::new(), + builder, exec: WeakExec::new(), })) } ?= std::ptr::null_mut() diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index 1fce28902a..924944835b 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -6,6 +6,7 @@ use super::body::hyper_body; use super::error::hyper_code; use super::task::{hyper_task_return_type, AsTaskType}; use super::HYPER_ITER_CONTINUE; +use crate::ext::HeaderCaseMap; use crate::header::{HeaderName, HeaderValue}; use crate::{Body, HeaderMap, Method, Request, Response, Uri}; @@ -18,16 +19,11 @@ pub struct hyper_response(pub(super) Response); /// An HTTP header map. /// /// These can be part of a request or response. -#[derive(Default)] pub struct hyper_headers { pub(super) headers: HeaderMap, orig_casing: HeaderCaseMap, } -// Will probably be moved to `hyper::ext::http1` -#[derive(Debug, Default)] -pub(crate) struct HeaderCaseMap(HeaderMap); - #[derive(Debug)] pub(crate) struct ReasonPhrase(pub(crate) Bytes); @@ -229,7 +225,7 @@ impl hyper_response { let orig_casing = resp .extensions_mut() .remove::() - .unwrap_or_default(); + .unwrap_or_else(HeaderCaseMap::default); resp.extensions_mut().insert(hyper_headers { headers, orig_casing, @@ -265,10 +261,7 @@ type hyper_headers_foreach_callback = impl hyper_headers { pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers { if let None = ext.get_mut::() { - ext.insert(hyper_headers { - headers: Default::default(), - orig_casing: Default::default(), - }); + ext.insert(hyper_headers::default()); } ext.get_mut::().unwrap() @@ -290,11 +283,11 @@ ffi_fn! { // // TODO: consider adding http::HeaderMap::entries() iterator for name in headers.headers.keys() { - let mut names = headers.orig_casing.get_all(name).iter(); + let mut names = headers.orig_casing.get_all(name); for value in headers.headers.get_all(name) { let (name_ptr, name_len) = if let Some(orig_name) = names.next() { - (orig_name.as_ptr(), orig_name.len()) + (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) } else { ( name.as_str().as_bytes().as_ptr(), @@ -349,6 +342,15 @@ ffi_fn! { } } +impl Default for hyper_headers { + fn default() -> Self { + Self { + headers: Default::default(), + orig_casing: HeaderCaseMap::default(), + } + } +} + unsafe fn raw_name_value( name: *const u8, name_len: size_t, @@ -370,25 +372,6 @@ unsafe fn raw_name_value( Ok((name, value, orig_name)) } -// ===== impl HeaderCaseMap ===== - -impl HeaderCaseMap { - pub(crate) fn get_all(&self, name: &HeaderName) -> http::header::GetAll<'_, Bytes> { - self.0.get_all(name) - } - - pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { - self.0.insert(name, orig); - } - - pub(crate) fn append(&mut self, name: N, orig: Bytes) - where - N: http::header::IntoHeaderName, - { - self.0.append(name, orig); - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/src/lib.rs b/src/lib.rs index 059f8821c6..e520051314 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,6 +80,7 @@ mod cfg; mod common; pub mod body; mod error; +mod ext; #[cfg(test)] mod mock; #[cfg(any(feature = "http1", feature = "http2",))] diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 01ec59528d..dd5397880d 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -46,7 +46,6 @@ where keep_alive: KA::Busy, method: None, h1_parser_config: ParserConfig::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, title_case_headers: false, h09_responses: false, @@ -77,13 +76,16 @@ where } #[cfg(feature = "client")] + pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { + self.state.h1_parser_config = parser_config; + } + pub(crate) fn set_title_case_headers(&mut self) { self.state.title_case_headers = true; } - #[cfg(feature = "client")] - pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { - self.state.h1_parser_config = parser_config; + pub(crate) fn set_preserve_header_case(&mut self) { + self.state.preserve_header_case = true; } #[cfg(feature = "client")] @@ -158,7 +160,6 @@ where cached_headers: &mut self.state.cached_headers, req_method: &mut self.state.method, h1_parser_config: self.state.h1_parser_config.clone(), - #[cfg(feature = "ffi")] preserve_header_case: self.state.preserve_header_case, h09_responses: self.state.h09_responses, } @@ -499,16 +500,6 @@ where self.enforce_version(&mut head); - // Maybe check if we should preserve header casing on received - // message headers... - #[cfg(feature = "ffi")] - { - if T::is_client() && !self.state.preserve_header_case { - self.state.preserve_header_case = - head.extensions.get::().is_some(); - } - } - let buf = self.io.headers_buf(); match super::role::encode_headers::( Encode { @@ -772,7 +763,6 @@ struct State { /// a body or not. method: Option, h1_parser_config: ParserConfig, - #[cfg(feature = "ffi")] preserve_header_case: bool, title_case_headers: bool, h09_responses: bool, diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index dc6e72c146..5cf670991b 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -160,7 +160,6 @@ where cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, h1_parser_config: parse_ctx.h1_parser_config.clone(), - #[cfg(feature = "ffi")] preserve_header_case: parse_ctx.preserve_header_case, h09_responses: parse_ctx.h09_responses, }, @@ -644,7 +643,6 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 3934502e27..3871277c25 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -72,7 +72,6 @@ pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option, req_method: &'a mut Option, h1_parser_config: ParserConfig, - #[cfg(feature = "ffi")] preserve_header_case: bool, h09_responses: bool, } diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 1493943175..84dc091147 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -5,16 +5,19 @@ use std::fmt::{self, Write}; use std::mem; -#[cfg(feature = "ffi")] +#[cfg(any(test, feature = "server", feature = "ffi"))] use bytes::Bytes; use bytes::BytesMut; use http::header::{self, Entry, HeaderName, HeaderValue}; +#[cfg(feature = "server")] +use http::header::ValueIter; use http::{HeaderMap, Method, StatusCode, Version}; use crate::body::DecodedLength; #[cfg(feature = "server")] use crate::common::date; use crate::error::Parse; +use crate::ext::HeaderCaseMap; use crate::headers; use crate::proto::h1::{ Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, @@ -191,6 +194,12 @@ impl Http1Transaction for Server { let mut is_te_chunked = false; let mut wants_upgrade = subject.0 == Method::CONNECT; + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; + let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); headers.reserve(headers_len); @@ -260,6 +269,10 @@ impl Http1Transaction for Server { _ => (), } + if let Some(ref mut header_case_map) = header_case_map { + header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); + } + headers.append(name, value); } @@ -268,6 +281,12 @@ impl Http1Transaction for Server { return Err(Parse::Header); } + let mut extensions = http::Extensions::default(); + + if let Some(header_case_map) = header_case_map { + extensions.insert(header_case_map); + } + *ctx.req_method = Some(subject.0.clone()); Ok(Some(ParsedMessage { @@ -275,7 +294,7 @@ impl Http1Transaction for Server { version, subject, headers, - extensions: http::Extensions::default(), + extensions, }, decode: decoder, expect_continue, @@ -284,20 +303,13 @@ impl Http1Transaction for Server { })) } - fn encode( - mut msg: Encode<'_, Self::Outgoing>, - mut dst: &mut Vec, - ) -> crate::Result { + fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { trace!( "Server::encode status={:?}, body={:?}, req_method={:?}", msg.head.subject, msg.body, msg.req_method ); - debug_assert!( - !msg.title_case_headers, - "no server config for title case headers" - ); let mut wrote_len = false; @@ -305,7 +317,7 @@ impl Http1Transaction for Server { // This is because Service only allows returning a single Response, and // so if you try to reply with a e.g. 100 Continue, you have no way of // replying with the latter status code response. - let (ret, mut is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { + let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { (Ok(()), true) } else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() { // Sending content-length or transfer-encoding header on 2xx response @@ -326,9 +338,6 @@ impl Http1Transaction for Server { // pushing some bytes onto the `dst`. In those cases, we don't want to send // the half-pushed message, so rewind to before. let orig_len = dst.len(); - let rewind = |dst: &mut Vec| { - dst.truncate(orig_len); - }; let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; dst.reserve(init_cap); @@ -359,6 +368,217 @@ impl Http1Transaction for Server { extend(dst, b"\r\n"); } + let orig_headers; + let extensions = mem::take(&mut msg.head.extensions); + let orig_headers = match extensions.get::() { + None if msg.title_case_headers => { + orig_headers = HeaderCaseMap::default(); + Some(&orig_headers) + } + orig_headers => orig_headers, + }; + let encoder = if let Some(orig_headers) = orig_headers { + Self::encode_headers_with_original_case( + msg, + dst, + is_last, + orig_len, + wrote_len, + orig_headers, + )? + } else { + Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)? + }; + + ret.map(|()| encoder) + } + + fn on_error(err: &crate::Error) -> Option> { + use crate::error::Kind; + let status = match *err.kind() { + Kind::Parse(Parse::Method) + | Kind::Parse(Parse::Header) + | Kind::Parse(Parse::Uri) + | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, + Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, + _ => return None, + }; + + debug!("sending automatic response ({}) for parse error", status); + let mut msg = MessageHead::default(); + msg.subject = status; + Some(msg) + } + + fn is_server() -> bool { + true + } + + fn update_date() { + date::update(); + } +} + +#[cfg(feature = "server")] +impl Server { + fn can_have_body(method: &Option, status: StatusCode) -> bool { + Server::can_chunked(method, status) + } + + fn can_chunked(method: &Option, status: StatusCode) -> bool { + if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() + { + false + } else if status.is_informational() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn can_have_content_length(method: &Option, status: StatusCode) -> bool { + if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { + false + } else { + match status { + StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, + _ => true, + } + } + } + + fn encode_headers_with_lower_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + ) -> crate::Result { + struct LowercaseWriter; + + impl HeaderNameWriter for LowercaseWriter { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + _: (HeaderName, &str), + ) { + extend(dst, line.as_bytes()) + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + _: HeaderName, + ) { + extend(dst, name_with_colon.as_bytes()) + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + extend(dst, name.as_str().as_bytes()) + } + } + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter) + } + + #[cold] + #[inline(never)] + fn encode_headers_with_original_case( + msg: Encode<'_, StatusCode>, + dst: &mut Vec, + is_last: bool, + orig_len: usize, + wrote_len: bool, + orig_headers: &HeaderCaseMap, + ) -> crate::Result { + struct OrigCaseWriter<'map> { + map: &'map HeaderCaseMap, + current: Option<(HeaderName, ValueIter<'map, Bytes>)>, + title_case_headers: bool, + } + + impl HeaderNameWriter for OrigCaseWriter<'_> { + #[inline] + fn write_full_header_line( + &mut self, + dst: &mut Vec, + _: &str, + (name, rest): (HeaderName, &str), + ) { + self.write_header_name(dst, &name); + extend(dst, rest.as_bytes()); + } + + #[inline] + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + _: &str, + name: HeaderName, + ) { + self.write_header_name(dst, &name); + extend(dst, b": "); + } + + #[inline] + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { + let Self { + map, + ref mut current, + title_case_headers, + } = *self; + if current.as_ref().map_or(true, |(last, _)| last != name) { + *current = None; + } + let (_, values) = + current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name))); + + if let Some(orig_name) = values.next() { + extend(dst, orig_name); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); + } else { + extend(dst, name.as_str().as_bytes()); + } + } + } + + let header_name_writer = OrigCaseWriter { + map: orig_headers, + current: None, + title_case_headers: msg.title_case_headers, + }; + + Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer) + } + + #[inline] + fn encode_headers( + msg: Encode<'_, StatusCode>, + mut dst: &mut Vec, + mut is_last: bool, + orig_len: usize, + mut wrote_len: bool, + mut header_name_writer: W, + ) -> crate::Result + where + W: HeaderNameWriter, + { + // In some error cases, we don't know about the invalid message until already + // pushing some bytes onto the `dst`. In those cases, we don't want to send + // the half-pushed message, so rewind to before. + let rewind = |dst: &mut Vec| { + dst.truncate(orig_len); + }; + let mut encoder = Encoder::length(0); let mut wrote_date = false; let mut cur_name = None; @@ -422,7 +642,11 @@ impl Http1Transaction for Server { if !is_name_written { encoder = Encoder::length(known_len); - extend(dst, b"content-length: "); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); extend(dst, value.as_bytes()); wrote_len = true; is_name_written = true; @@ -450,7 +674,11 @@ impl Http1Transaction for Server { } else { // we haven't written content-length yet! encoder = Encoder::length(len); - extend(dst, b"content-length: "); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); extend(dst, value.as_bytes()); wrote_len = true; is_name_written = true; @@ -505,7 +733,11 @@ impl Http1Transaction for Server { if !is_name_written { encoder = Encoder::chunked(); is_name_written = true; - extend(dst, b"transfer-encoding: "); + header_name_writer.write_header_name_with_colon( + dst, + "transfer-encoding: ", + header::TRANSFER_ENCODING, + ); extend(dst, value.as_bytes()); } else { extend(dst, b", "); @@ -519,7 +751,11 @@ impl Http1Transaction for Server { } if !is_name_written { is_name_written = true; - extend(dst, b"connection: "); + header_name_writer.write_header_name_with_colon( + dst, + "connection: ", + header::CONNECTION, + ); extend(dst, value.as_bytes()); } else { extend(dst, b", "); @@ -541,7 +777,7 @@ impl Http1Transaction for Server { "{:?} set is_name_written and didn't continue loop", name, ); - extend(dst, name.as_str().as_bytes()); + header_name_writer.write_header_name(dst, name); extend(dst, b": "); extend(dst, value.as_bytes()); extend(dst, b"\r\n"); @@ -557,13 +793,21 @@ impl Http1Transaction for Server { { Encoder::close_delimited() } else { - extend(dst, b"transfer-encoding: chunked\r\n"); + header_name_writer.write_full_header_line( + dst, + "transfer-encoding: chunked\r\n", + (header::TRANSFER_ENCODING, ": chunked\r\n"), + ); Encoder::chunked() } } None | Some(BodyLength::Known(0)) => { if Server::can_have_content_length(msg.req_method, msg.head.subject) { - extend(dst, b"content-length: 0\r\n"); + header_name_writer.write_full_header_line( + dst, + "content-length: 0\r\n", + (header::CONTENT_LENGTH, ": 0\r\n"), + ) } Encoder::length(0) } @@ -571,7 +815,11 @@ impl Http1Transaction for Server { if !Server::can_have_content_length(msg.req_method, msg.head.subject) { Encoder::length(0) } else { - extend(dst, b"content-length: "); + header_name_writer.write_header_name_with_colon( + dst, + "content-length: ", + header::CONTENT_LENGTH, + ); let _ = ::itoa::write(&mut dst, len); extend(dst, b"\r\n"); Encoder::length(len) @@ -592,72 +840,32 @@ impl Http1Transaction for Server { // cached date is much faster than formatting every request if !wrote_date { dst.reserve(date::DATE_VALUE_LENGTH + 8); - extend(dst, b"date: "); + header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE); date::extend(dst); extend(dst, b"\r\n\r\n"); } else { extend(dst, b"\r\n"); } - ret.map(|()| encoder.set_last(is_last)) - } - - fn on_error(err: &crate::Error) -> Option> { - use crate::error::Kind; - let status = match *err.kind() { - Kind::Parse(Parse::Method) - | Kind::Parse(Parse::Header) - | Kind::Parse(Parse::Uri) - | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, - Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, - _ => return None, - }; - - debug!("sending automatic response ({}) for parse error", status); - let mut msg = MessageHead::default(); - msg.subject = status; - Some(msg) - } - - fn is_server() -> bool { - true - } - - fn update_date() { - date::update(); + Ok(encoder.set_last(is_last)) } } #[cfg(feature = "server")] -impl Server { - fn can_have_body(method: &Option, status: StatusCode) -> bool { - Server::can_chunked(method, status) - } - - fn can_chunked(method: &Option, status: StatusCode) -> bool { - if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() - { - false - } else if status.is_informational() { - false - } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } - } - } - - fn can_have_content_length(method: &Option, status: StatusCode) -> bool { - if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { - false - } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } - } - } +trait HeaderNameWriter { + fn write_full_header_line( + &mut self, + dst: &mut Vec, + line: &str, + name_value_pair: (HeaderName, &str), + ); + fn write_header_name_with_colon( + &mut self, + dst: &mut Vec, + name_with_colon: &str, + name: HeaderName, + ); + fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName); } #[cfg(feature = "client")] @@ -732,8 +940,11 @@ impl Http1Transaction for Client { let mut keep_alive = version == Version::HTTP_11; - #[cfg(feature = "ffi")] - let mut header_case_map = crate::ffi::HeaderCaseMap::default(); + let mut header_case_map = if ctx.preserve_header_case { + Some(HeaderCaseMap::default()) + } else { + None + }; headers.reserve(headers_len); for header in &headers_indices[..headers_len] { @@ -751,19 +962,16 @@ impl Http1Transaction for Client { } } - #[cfg(feature = "ffi")] - if ctx.preserve_header_case { + if let Some(ref mut header_case_map) = header_case_map { header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); } headers.append(name, value); } - #[allow(unused_mut)] let mut extensions = http::Extensions::default(); - #[cfg(feature = "ffi")] - if ctx.preserve_header_case { + if let Some(header_case_map) = header_case_map { extensions.insert(header_case_map); } @@ -830,26 +1038,17 @@ impl Http1Transaction for Client { } extend(dst, b"\r\n"); - #[cfg(feature = "ffi")] - { - if msg.title_case_headers { - write_headers_title_case(&msg.head.headers, dst); - } else if let Some(orig_headers) = - msg.head.extensions.get::() - { - write_headers_original_case(&msg.head.headers, orig_headers, dst); - } else { - write_headers(&msg.head.headers, dst); - } - } - - #[cfg(not(feature = "ffi"))] - { - if msg.title_case_headers { - write_headers_title_case(&msg.head.headers, dst); - } else { - write_headers(&msg.head.headers, dst); - } + if let Some(orig_headers) = msg.head.extensions.get::() { + write_headers_original_case( + &msg.head.headers, + orig_headers, + dst, + msg.title_case_headers, + ); + } else if msg.title_case_headers { + write_headers_title_case(&msg.head.headers, dst); + } else { + write_headers(&msg.head.headers, dst); } extend(dst, b"\r\n"); @@ -1162,12 +1361,12 @@ fn write_headers(headers: &HeaderMap, dst: &mut Vec) { } } -#[cfg(feature = "ffi")] #[cold] fn write_headers_original_case( headers: &HeaderMap, - orig_case: &crate::ffi::HeaderCaseMap, + orig_case: &HeaderCaseMap, dst: &mut Vec, + title_case_headers: bool, ) { // For each header name/value pair, there may be a value in the casemap // that corresponds to the HeaderValue. So, we iterator all the keys, @@ -1175,11 +1374,13 @@ fn write_headers_original_case( // // TODO: consider adding http::HeaderMap::entries() iterator for name in headers.keys() { - let mut names = orig_case.get_all(name).iter(); + let mut names = orig_case.get_all(name); for value in headers.get_all(name) { if let Some(orig_name) = names.next() { - extend(dst, orig_name); + extend(dst, orig_name.as_ref()); + } else if title_case_headers { + title_case(dst, name.as_str().as_bytes()); } else { extend(dst, name.as_str().as_bytes()); } @@ -1233,7 +1434,6 @@ mod tests { cached_headers: &mut None, req_method: &mut method, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1257,7 +1457,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; @@ -1276,7 +1475,6 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; @@ -1293,7 +1491,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: true, }; @@ -1312,7 +1509,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; @@ -1335,7 +1531,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config, - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; @@ -1355,13 +1550,45 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }; Client::parse(&mut raw, ctx).unwrap_err(); } + #[test] + fn test_parse_preserve_header_case_in_request() { + let mut raw = + BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n"); + let ctx = ParseContext { + cached_headers: &mut None, + req_method: &mut None, + h1_parser_config: Default::default(), + preserve_header_case: true, + h09_responses: false, + }; + let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); + let orig_headers = parsed_message + .head + .extensions + .get::() + .unwrap(); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("host")) + .into_iter() + .collect::>(), + vec![&Bytes::from("Host")] + ); + assert_eq!( + orig_headers + .get_all_internal(&HeaderName::from_static("x-bread")) + .into_iter() + .collect::>(), + vec![&Bytes::from("X-BREAD")] + ); + } + #[test] fn test_decoder_request() { fn parse(s: &str) -> ParsedMessage { @@ -1372,7 +1599,6 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1389,7 +1615,6 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1605,7 +1830,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, } @@ -1622,7 +1846,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(m), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1639,7 +1862,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1927,6 +2149,75 @@ mod tests { assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec()); } + #[test] + fn test_client_request_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n" + .as_ref(), + ); + } + #[test] + fn test_client_request_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Client::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + assert_eq!( + &*vec, + b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n" + .as_ref(), + ); + } + #[test] fn test_server_encode_connect_method() { let mut head = MessageHead::default(); @@ -1947,6 +2238,104 @@ mod tests { assert!(encoder.is_last()); } + #[test] + fn test_server_response_encode_title_case() { + use crate::proto::BodyLength; + use http::header::HeaderValue; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\n"; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: false, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + + #[test] + fn test_server_response_encode_orig_and_title_case() { + use crate::proto::BodyLength; + use http::header::{HeaderValue, CONTENT_LENGTH}; + + let mut head = MessageHead::default(); + head.headers + .insert("content-length", HeaderValue::from_static("10")); + head.headers + .insert("content-type", HeaderValue::from_static("application/json")); + + let mut orig_headers = HeaderCaseMap::default(); + orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); + head.extensions.insert(orig_headers); + + let mut vec = Vec::new(); + Server::encode( + Encode { + head: &mut head, + body: Some(BodyLength::Known(10)), + keep_alive: true, + req_method: &mut None, + title_case_headers: true, + }, + &mut vec, + ) + .unwrap(); + + let expected_response = + b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: "; + + assert_eq!(&vec[..expected_response.len()], &expected_response[..]); + } + #[test] fn parse_header_htabs() { let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n"); @@ -1956,7 +2345,6 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -1967,17 +2355,16 @@ mod tests { assert_eq!(parsed.head.headers["server"], "hello\tworld"); } - #[cfg(feature = "ffi")] #[test] fn test_write_headers_orig_case_empty_value() { let mut headers = HeaderMap::new(); let name = http::header::HeaderName::from_static("x-empty"); headers.insert(&name, "".parse().expect("parse empty")); - let mut orig_cases = crate::ffi::HeaderCaseMap::default(); + let mut orig_cases = HeaderCaseMap::default(); orig_cases.insert(name, Bytes::from_static(b"X-EmptY")); let mut dst = Vec::new(); - super::write_headers_original_case(&headers, &orig_cases, &mut dst); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); assert_eq!( dst, b"X-EmptY:\r\n", @@ -1985,7 +2372,6 @@ mod tests { ); } - #[cfg(feature = "ffi")] #[test] fn test_write_headers_orig_case_multiple_entries() { let mut headers = HeaderMap::new(); @@ -1993,12 +2379,12 @@ mod tests { headers.insert(&name, "a".parse().unwrap()); headers.append(&name, "b".parse().unwrap()); - let mut orig_cases = crate::ffi::HeaderCaseMap::default(); + let mut orig_cases = HeaderCaseMap::default(); orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty")); orig_cases.append(name, Bytes::from_static(b"X-EMPTY")); let mut dst = Vec::new(); - super::write_headers_original_case(&headers, &orig_cases, &mut dst); + super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n"); } @@ -2039,7 +2425,6 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, @@ -2076,7 +2461,6 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), - #[cfg(feature = "ffi")] preserve_header_case: false, h09_responses: false, }, diff --git a/src/server/conn.rs b/src/server/conn.rs index 5137708fcb..0cef9d5e78 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -88,6 +88,7 @@ pub struct Http { exec: E, h1_half_close: bool, h1_keep_alive: bool, + h1_title_case_headers: bool, #[cfg(feature = "http2")] h2_builder: proto::h2::server::Config, mode: ConnectionMode, @@ -234,6 +235,7 @@ impl Http { exec: Exec::Default, h1_half_close: false, h1_keep_alive: true, + h1_title_case_headers: false, #[cfg(feature = "http2")] h2_builder: Default::default(), mode: ConnectionMode::default(), @@ -286,6 +288,19 @@ impl Http { self } + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + /// Sets whether HTTP2 is required. /// /// Default is false @@ -459,6 +474,7 @@ impl Http { exec, h1_half_close: self.h1_half_close, h1_keep_alive: self.h1_keep_alive, + h1_title_case_headers: self.h1_title_case_headers, #[cfg(feature = "http2")] h2_builder: self.h2_builder, mode: self.mode, @@ -514,6 +530,9 @@ impl Http { if self.h1_half_close { conn.set_allow_half_close(); } + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self.max_buf_size { conn.set_max_buf_size(max); diff --git a/src/server/server.rs b/src/server/server.rs index 48cc6e2803..e02ab94b16 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -231,6 +231,19 @@ impl Builder { self } + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.protocol.http1_title_case_headers(val); + self + } + /// Sets whether HTTP/1 is required. /// /// Default is `false`. From 7f69d8f94ccf258190f40585cd668e986b0829e4 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 21 Apr 2021 16:19:31 -0700 Subject: [PATCH 058/420] v0.14.6 --- CHANGELOG.md | 9 +++++++++ Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f52fb9f388..31251f09b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +### v0.14.6 (2021-04-21) + + +#### Features + +* **client:** add option to allow misplaced spaces in HTTP/1 responses (#2506) ([11345394](https://github.com/hyperium/hyper/commit/11345394d968d4817e1a0ee2550228ac0ae7ce74)) +* **http1:** add options to preserve header casing (#2480) ([dbea7716](https://github.com/hyperium/hyper/commit/dbea7716f157896bf7d2d417be7b4e382e7dc34f), closes [#2313](https://github.com/hyperium/hyper/issues/2313)) + + ### v0.14.5 (2021-03-26) diff --git a/Cargo.toml b/Cargo.toml index f80170c6cb..704d914646 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.5" # don't forget to update html_root_url +version = "0.14.6" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From a303b3c329e6b8ecfa1da0b9b9e94736628167e0 Mon Sep 17 00:00:00 2001 From: ty Date: Thu, 22 Apr 2021 12:01:06 +0800 Subject: [PATCH 059/420] fix(http1): http1_title_case_headers should move Builder --- src/server/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/server.rs b/src/server/server.rs index e02ab94b16..3fa949162c 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -239,7 +239,7 @@ impl Builder { /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + pub fn http1_title_case_headers(mut self, val: bool) -> Self { self.protocol.http1_title_case_headers(val); self } From 4fd6c4cb0b58bb0831ae0f876d858aba1588d0e3 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 22 Apr 2021 09:13:51 +0200 Subject: [PATCH 060/420] feat(server): implement forgotten settings for case preserving --- examples/http_proxy.rs | 11 +++++++++-- src/server/conn.rs | 19 +++++++++++++++++++ src/server/server.rs | 13 +++++++++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs index a08bdc4a29..5911a2dbe8 100644 --- a/examples/http_proxy.rs +++ b/examples/http_proxy.rs @@ -23,14 +23,21 @@ type HttpClient = Client; #[tokio::main] async fn main() { let addr = SocketAddr::from(([127, 0, 0, 1], 8100)); - let client = HttpClient::new(); + + let client = Client::builder() + .http1_title_case_headers(true) + .http1_preserve_header_case(true) + .build_http(); let make_service = make_service_fn(move |_| { let client = client.clone(); async move { Ok::<_, Infallible>(service_fn(move |req| proxy(client.clone(), req))) } }); - let server = Server::bind(&addr).serve(make_service); + let server = Server::bind(&addr) + .http1_preserve_header_case(true) + .http1_title_case_headers(true) + .serve(make_service); println!("Listening on http://{}", addr); diff --git a/src/server/conn.rs b/src/server/conn.rs index 0cef9d5e78..c1a52f1c0d 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -89,6 +89,7 @@ pub struct Http { h1_half_close: bool, h1_keep_alive: bool, h1_title_case_headers: bool, + h1_preserve_header_case: bool, #[cfg(feature = "http2")] h2_builder: proto::h2::server::Config, mode: ConnectionMode, @@ -236,6 +237,7 @@ impl Http { h1_half_close: false, h1_keep_alive: true, h1_title_case_headers: false, + h1_preserve_header_case: false, #[cfg(feature = "http2")] h2_builder: Default::default(), mode: ConnectionMode::default(), @@ -301,6 +303,19 @@ impl Http { self } + /// Set whether HTTP/1 connections will write header names as provided + /// at the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + /// Sets whether HTTP2 is required. /// /// Default is false @@ -475,6 +490,7 @@ impl Http { h1_half_close: self.h1_half_close, h1_keep_alive: self.h1_keep_alive, h1_title_case_headers: self.h1_title_case_headers, + h1_preserve_header_case: self.h1_preserve_header_case, #[cfg(feature = "http2")] h2_builder: self.h2_builder, mode: self.mode, @@ -533,6 +549,9 @@ impl Http { if self.h1_title_case_headers { conn.set_title_case_headers(); } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self.max_buf_size { conn.set_max_buf_size(max); diff --git a/src/server/server.rs b/src/server/server.rs index 3fa949162c..20c993e8ac 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -244,6 +244,19 @@ impl Builder { self } + /// Set whether HTTP/1 connections will write header names as provided + /// at the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(mut self, val: bool) -> Self { + self.protocol.http1_preserve_header_case(val); + self + } + /// Sets whether HTTP/1 is required. /// /// Default is `false`. From 963dc23f0dc84bbbd9341473f2a87c456670e135 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Apr 2021 09:52:41 -0700 Subject: [PATCH 061/420] v0.14.7 --- CHANGELOG.md | 13 +++++++++++++ Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31251f09b3..c01ddf91f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +### v0.14.7 (2021-04-22) + + +#### Bug Fixes + +* **http1:** http1_title_case_headers should move Builder ([a303b3c3](https://github.com/hyperium/hyper/commit/a303b3c329e6b8ecfa1da0b9b9e94736628167e0)) + + +#### Features + +* **server:** implement forgotten settings for case preserving ([4fd6c4cb](https://github.com/hyperium/hyper/commit/4fd6c4cb0b58bb0831ae0f876d858aba1588d0e3)) + + ### v0.14.6 (2021-04-21) diff --git a/Cargo.toml b/Cargo.toml index 704d914646..4a94f06aa9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.6" # don't forget to update html_root_url +version = "0.14.7" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From 4e9a006498c7bdb5bb2ccb76a4c877f6da7e23b2 Mon Sep 17 00:00:00 2001 From: telotic Date: Wed, 28 Apr 2021 17:07:03 -0700 Subject: [PATCH 062/420] docs(example): support requests to domain names in example http_proxy (#2513) --- examples/http_proxy.rs | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs index 5911a2dbe8..bc56db1a20 100644 --- a/examples/http_proxy.rs +++ b/examples/http_proxy.rs @@ -3,8 +3,6 @@ use std::convert::Infallible; use std::net::SocketAddr; -use futures_util::future::try_join; - use hyper::service::{make_service_fn, service_fn}; use hyper::upgrade::Upgraded; use hyper::{Body, Client, Method, Request, Response, Server}; @@ -18,8 +16,8 @@ type HttpClient = Client; // 2. config http_proxy in command line // $ export http_proxy=http://127.0.0.1:8100 // $ export https_proxy=http://127.0.0.1:8100 -// 3. send requests (don't use a domain name) -// $ curl -i https://8.8.8.8 +// 3. send requests +// $ curl -i https://www.some_domain.com/ #[tokio::main] async fn main() { let addr = SocketAddr::from(([127, 0, 0, 1], 8100)); @@ -88,38 +86,25 @@ async fn proxy(client: HttpClient, req: Request) -> Result, } } -fn host_addr(uri: &http::Uri) -> Option { - uri.authority().and_then(|auth| auth.as_str().parse().ok()) +fn host_addr(uri: &http::Uri) -> Option { + uri.authority().and_then(|auth| Some(auth.to_string())) } // Create a TCP connection to host:port, build a tunnel between the connection and // the upgraded connection -async fn tunnel(upgraded: Upgraded, addr: SocketAddr) -> std::io::Result<()> { +async fn tunnel(mut upgraded: Upgraded, addr: String) -> std::io::Result<()> { // Connect to remote server let mut server = TcpStream::connect(addr).await?; // Proxying data - let amounts = { - let (mut server_rd, mut server_wr) = server.split(); - let (mut client_rd, mut client_wr) = tokio::io::split(upgraded); - - let client_to_server = tokio::io::copy(&mut client_rd, &mut server_wr); - let server_to_client = tokio::io::copy(&mut server_rd, &mut client_wr); - - try_join(client_to_server, server_to_client).await - }; + let (from_client, from_server) = + tokio::io::copy_bidirectional(&mut upgraded, &mut server).await?; // Print message when done - match amounts { - Ok((from_client, from_server)) => { - println!( - "client wrote {} bytes and received {} bytes", - from_client, from_server - ); - } - Err(e) => { - println!("tunnel error: {}", e); - } - }; + println!( + "client wrote {} bytes and received {} bytes", + from_client, from_server + ); + Ok(()) } From d1d2f32a7358c1c7d489ebbb98f4cbfdca9bb573 Mon Sep 17 00:00:00 2001 From: Mohsen Alizadeh Date: Mon, 3 May 2021 22:58:34 +0200 Subject: [PATCH 063/420] docs(headers): no_inline doc on HeaderMap (#2525) --- src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index e520051314..eb7c1730d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -69,7 +69,10 @@ extern crate tracing; #[cfg(all(test, feature = "nightly"))] extern crate test; -pub use http::{header, HeaderMap, Method, Request, Response, StatusCode, Uri, Version}; +pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Version}; + +#[doc(no_inline)] +pub use crate::http::HeaderMap; pub use crate::body::Body; pub use crate::error::{Error, Result}; From e79d09396da955f235c267832312543d8230d867 Mon Sep 17 00:00:00 2001 From: David Pedersen Date: Thu, 6 May 2021 22:48:53 +0200 Subject: [PATCH 064/420] docs(client): document the guarantees of cloning a `Client` (#2540) Small docs improvement to mention that cloning a `Client` is cheap and will reuse the connection pool. Fixes https://github.com/hyperium/hyper/issues/2239 --- src/client/client.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/client.rs b/src/client/client.rs index a219eb14ad..5545eab997 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -19,6 +19,9 @@ use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, task, Future, Lazy, use crate::rt::Executor; /// A Client to make outgoing HTTP requests. +/// +/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The +/// underlying connection pool will be reused. #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { config: Config, From 8a05f8eec133793899c94dcbf1520eee3b91aa50 Mon Sep 17 00:00:00 2001 From: David Pedersen Date: Wed, 12 May 2021 01:56:01 +0200 Subject: [PATCH 065/420] docs(server): add bigger example to server module (#2539) It can sometimes be tricky to discover where to use `move` closures, `async move {}`, and `.clone()` when creating a server. This adds a slightly more bigger example that will hopefully help some. Fixes https://github.com/hyperium/hyper/issues/2446 --- src/server/mod.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/src/server/mod.rs b/src/server/mod.rs index 7647449adf..690c8127a7 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -16,7 +16,7 @@ //! //! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. //! -//! ## Example +//! ## Examples //! //! ```no_run //! use std::convert::Infallible; @@ -84,6 +84,67 @@ //! # fn main() {} //! ``` //! +//! Passing data to your request handler can be done like so: +//! +//! ```no_run +//! use std::convert::Infallible; +//! use std::net::SocketAddr; +//! use hyper::{Body, Request, Response, Server}; +//! use hyper::service::{make_service_fn, service_fn}; +//! use hyper::server::conn::AddrStream; +//! +//! #[derive(Clone)] +//! struct AppContext { +//! // Whatever data your application needs can go here +//! } +//! +//! async fn handle( +//! context: AppContext, +//! addr: SocketAddr, +//! req: Request +//! ) -> Result, Infallible> { +//! Ok(Response::new(Body::from("Hello World"))) +//! } +//! +//! # #[cfg(feature = "runtime")] +//! #[tokio::main] +//! async fn main() { +//! let context = AppContext { +//! // ... +//! }; +//! +//! // A `MakeService` that produces a `Service` to handle each connection. +//! let make_service = make_service_fn(move |conn: &AddrStream| { +//! // We have to clone the context to share it with each invocation of +//! // `make_service`. If your data doesn't implement `Clone` consider using +//! // an `std::sync::Arc`. +//! let context = context.clone(); +//! +//! // You can grab the address of the incoming connection like so. +//! let addr = conn.remote_addr(); +//! +//! // Create a `Service` for responding to the request. +//! let service = service_fn(move |req| { +//! handle(context.clone(), addr, req) +//! }); +//! +//! // Return the service to hyper. +//! async move { Ok::<_, Infallible>(service) } +//! }); +//! +//! // Run the server like above... +//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); +//! +//! let server = Server::bind(&addr).serve(make_service); +//! +//! if let Err(e) = server.await { +//! eprintln!("server error: {}", e); +//! } +//! } +//! # #[cfg(not(feature = "runtime"))] +//! # fn main() {} +//! ``` +//! //! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html pub mod accept; From ccba59fb1b592dfdfca4b870e0922e5ba8244825 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 12 May 2021 17:51:45 -0700 Subject: [PATCH 066/420] docs(common): remove favicon doc from sync_wrapper module (#2548) --- src/common/sync_wrapper.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs index 05b11e2c4b..704d1a6712 100644 --- a/src/common/sync_wrapper.rs +++ b/src/common/sync_wrapper.rs @@ -1,11 +1,6 @@ /* * This is a copy of the sync_wrapper crate. */ -//! A mutual exclusion primitive that relies on static type information only -//! -//! This library is inspired by [this discussion](https://internals.rust-lang.org/t/what-shall-sync-mean-across-an-await/12020/2). -#![doc(html_logo_url = "https://developer.actyx.com/img/logo.svg")] -#![doc(html_favicon_url = "https://developer.actyx.com/img/favicon.ico")] /// A mutual exclusion primitive that relies on static type information only /// From b9916c410182c6225e857f0cded355ea1b74c865 Mon Sep 17 00:00:00 2001 From: boxdot Date: Thu, 13 May 2021 03:24:41 +0200 Subject: [PATCH 067/420] feat(client): allow to config http2 max concurrent reset streams (#2535) Setting streams to 0 makes h2 work on wasm platforms without a `Instant::now` implementation. --- src/client/client.rs | 15 +++++++++++++++ src/client/conn.rs | 18 +++++++++++++++++- src/proto/h2/client.rs | 7 ++++++- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index 5545eab997..90d94e12ed 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -1151,6 +1151,21 @@ impl Builder { self } + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.conn_builder.http2_max_concurrent_reset_streams(max); + self + } + /// Set whether to retry requests that get disrupted before ever starting /// to write. /// diff --git a/src/client/conn.rs b/src/client/conn.rs index ef98fd30ce..70c1dad248 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -530,7 +530,8 @@ impl Builder { &mut self, enabled: bool, ) -> &mut Builder { - self.h1_parser_config.allow_spaces_after_header_name_in_responses(enabled); + self.h1_parser_config + .allow_spaces_after_header_name_in_responses(enabled); self } @@ -701,6 +702,21 @@ impl Builder { self } + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams = Some(max); + self + } + /// Constructs a connection with the configured options and IO. pub fn handshake( &self, diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 4f583f2bfa..6d310e94c9 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -10,7 +10,7 @@ use tokio::io::{AsyncRead, AsyncWrite}; use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; use crate::body::HttpBody; -use crate::common::{task, exec::Exec, Future, Never, Pin, Poll}; +use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; use crate::headers; use crate::proto::Dispatched; use crate::{Body, Request, Response}; @@ -44,6 +44,7 @@ pub(crate) struct Config { pub(crate) keep_alive_timeout: Duration, #[cfg(feature = "runtime")] pub(crate) keep_alive_while_idle: bool, + pub(crate) max_concurrent_reset_streams: Option, } impl Default for Config { @@ -59,6 +60,7 @@ impl Default for Config { keep_alive_timeout: Duration::from_secs(20), #[cfg(feature = "runtime")] keep_alive_while_idle: false, + max_concurrent_reset_streams: None, } } } @@ -70,6 +72,9 @@ fn new_builder(config: &Config) -> Builder { .initial_connection_window_size(config.initial_conn_window_size) .max_frame_size(config.max_frame_size) .enable_push(false); + if let Some(max) = config.max_concurrent_reset_streams { + builder.max_concurrent_reset_streams(max); + } builder } From 960a69a5878ede82c56f50ac1444a9e75e885a8f Mon Sep 17 00:00:00 2001 From: "Adam C. Foltzer" Date: Wed, 12 May 2021 18:30:28 -0700 Subject: [PATCH 068/420] feat(error): add `Error::is_parse_too_large` and `Error::is_parse_status` methods (#2538) The discussion in #2462 opened up some larger questions about more comprehensive approaches to the error API, with the agreement that additional methods would be desirable in the short term. These methods address an immediate need of our customers, so I would like to get them in first before we flesh out a future solution. One potentially controversial choice here is to still return `true` from `is_parse_error()` for these variants. I hope the naming of the methods make it clear that the new predicates are refinements of the existing one, but I didn't want to change the behavior of `is_parse_error()` which would require a major version bump. --- src/error.rs | 11 +++++++ tests/client.rs | 77 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/src/error.rs b/src/error.rs index 663156e0a9..6912cd3a70 100644 --- a/src/error.rs +++ b/src/error.rs @@ -132,6 +132,17 @@ impl Error { matches!(self.inner.kind, Kind::Parse(_)) } + /// Returns true if this was an HTTP parse error caused by a message that was too large. + pub fn is_parse_too_large(&self) -> bool { + matches!(self.inner.kind, Kind::Parse(Parse::TooLarge)) + } + + /// Returns true if this was an HTTP parse error caused by an invalid response status code or + /// reason phrase. + pub fn is_parse_status(&self) -> bool { + matches!(self.inner.kind, Kind::Parse(Parse::Status)) + } + /// Returns true if this error was caused by user code. pub fn is_user(&self) -> bool { matches!(self.inner.kind, Kind::User(_)) diff --git a/tests/client.rs b/tests/client.rs index d22f8cf2ca..52747a30c6 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -907,6 +907,83 @@ test! { } +test! { + name: client_error_parse_too_large, + + server: + expected: "\ + GET /err HTTP/1.1\r\n\ + host: {addr}\r\n\ + \r\n\ + ", + reply: { + let long_header = std::iter::repeat("A").take(500_000).collect::(); + format!("\ + HTTP/1.1 200 OK\r\n\ + {}: {}\r\n\ + \r\n\ + ", + long_header, + long_header, + ) + }, + + client: + request: { + method: GET, + url: "http://{addr}/err", + }, + // should get a Parse(TooLarge) error + error: |err| err.is_parse() && err.is_parse_too_large(), + +} + +test! { + name: client_error_parse_status_out_of_range, + + server: + expected: "\ + GET /err HTTP/1.1\r\n\ + host: {addr}\r\n\ + \r\n\ + ", + reply: "\ + HTTP/1.1 001 OK\r\n\ + \r\n\ + ", + + client: + request: { + method: GET, + url: "http://{addr}/err", + }, + // should get a Parse(Status) error + error: |err| err.is_parse() && err.is_parse_status(), +} + +test! { + name: client_error_parse_status_syntactically_invalid, + + server: + expected: "\ + GET /err HTTP/1.1\r\n\ + host: {addr}\r\n\ + \r\n\ + ", + reply: "\ + HTTP/1.1 1 OK\r\n\ + \r\n\ + ", + + client: + request: { + method: GET, + url: "http://{addr}/err", + }, + // should get a Parse(Status) error + error: |err| err.is_parse() && err.is_parse_status(), +} + test! { name: client_100_continue, From 4cd06bf25661d7e43e2fdf43eabdb8508055cf3a Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 18 May 2021 14:12:15 -0700 Subject: [PATCH 069/420] perf(http2): slow adaptive window pings as the BDP stabilizes (#2550) This introduces a delay to sending a ping to calculate the BDP that becomes shorter as the BDP is changing, to improve throughput quickly, but then also becomes longer as the BDP stabilizes, to reduce the amount of pings sent. This improved the performance of the adaptive window end_to_end benchmark. It should also reduce the amount of pings the remote has to deal with, hopefully preventing hyper from triggering ENHANCE_YOUR_CALM errors. --- src/proto/h2/ping.rs | 82 ++++++++++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs index 105fc69a39..3ff45cae1e 100644 --- a/src/proto/h2/ping.rs +++ b/src/proto/h2/ping.rs @@ -51,9 +51,15 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) bdp: wnd, max_bandwidth: 0.0, rtt: 0.0, + ping_delay: Duration::from_millis(100), + stable_count: 0, }); - let bytes = bdp.as_ref().map(|_| 0); + let (bytes, next_bdp_at) = if bdp.is_some() { + (Some(0), Some(Instant::now())) + } else { + (None, None) + }; #[cfg(feature = "runtime")] let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { @@ -75,6 +81,7 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) is_keep_alive_timed_out: false, ping_pong, ping_sent_at: None, + next_bdp_at, })); ( @@ -125,6 +132,9 @@ struct Shared { /// If `Some`, bdp is enabled, and this tracks how many bytes have been /// read during the current sample. bytes: Option, + /// We delay a variable amount of time between BDP pings. This allows us + /// to send less pings as the bandwidth stabilizes. + next_bdp_at: Option, // keep-alive /// If `Some`, keep-alive is enabled, and the Instant is how long ago @@ -143,6 +153,12 @@ struct Bdp { max_bandwidth: f64, /// Round trip time in seconds rtt: f64, + /// Delay the next ping by this amount. + /// + /// This will change depending on how stable the current bandwidth is. + ping_delay: Duration, + /// The count of ping round trips where BDP has stayed the same. + stable_count: u32, } #[cfg(feature = "runtime")] @@ -207,6 +223,17 @@ impl Recorder { #[cfg(feature = "runtime")] locked.update_last_read_at(); + // are we ready to send another bdp ping? + // if not, we don't need to record bytes either + + if let Some(ref next_bdp_at) = locked.next_bdp_at { + if Instant::now() < *next_bdp_at { + return; + } else { + locked.next_bdp_at = None; + } + } + if let Some(ref mut bytes) = locked.bytes { *bytes += len; } else { @@ -265,6 +292,7 @@ impl Recorder { impl Ponger { pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll { + let now = Instant::now(); let mut locked = self.shared.lock().unwrap(); #[cfg(feature = "runtime")] let is_idle = self.is_idle(); @@ -282,13 +310,13 @@ impl Ponger { return Poll::Pending; } - let (bytes, rtt) = match locked.ping_pong.poll_pong(cx) { + match locked.ping_pong.poll_pong(cx) { Poll::Ready(Ok(_pong)) => { - let rtt = locked + let start = locked .ping_sent_at - .expect("pong received implies ping_sent_at") - .elapsed(); + .expect("pong received implies ping_sent_at"); locked.ping_sent_at = None; + let rtt = now - start; trace!("recv pong"); #[cfg(feature = "runtime")] @@ -299,19 +327,20 @@ impl Ponger { } } - if self.bdp.is_some() { + if let Some(ref mut bdp) = self.bdp { let bytes = locked.bytes.expect("bdp enabled implies bytes"); locked.bytes = Some(0); // reset trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); - (bytes, rtt) - } else { - // no bdp, done! - return Poll::Pending; + + let update = bdp.calculate(bytes, rtt); + locked.next_bdp_at = Some(now + bdp.ping_delay); + if let Some(update) = update { + return Poll::Ready(Ponged::SizeUpdate(update)) + } } } Poll::Ready(Err(e)) => { debug!("pong error: {}", e); - return Poll::Pending; } Poll::Pending => { #[cfg(feature = "runtime")] @@ -324,19 +353,11 @@ impl Ponger { } } } - - return Poll::Pending; } - }; - - drop(locked); - - if let Some(bdp) = self.bdp.as_mut().and_then(|bdp| bdp.calculate(bytes, rtt)) { - Poll::Ready(Ponged::SizeUpdate(bdp)) - } else { - // XXX: this doesn't register a waker...? - Poll::Pending } + + // XXX: this doesn't register a waker...? + Poll::Pending } #[cfg(feature = "runtime")] @@ -386,6 +407,7 @@ impl Bdp { fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option { // No need to do any math if we're at the limit. if self.bdp as usize == BDP_LIMIT { + self.stabilize_delay(); return None; } @@ -405,6 +427,7 @@ impl Bdp { if bw < self.max_bandwidth { // not a faster bandwidth, so don't update + self.stabilize_delay(); return None; } else { self.max_bandwidth = bw; @@ -415,11 +438,26 @@ impl Bdp { if bytes >= self.bdp as usize * 2 / 3 { self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize; trace!("BDP increased to {}", self.bdp); + + self.stable_count = 0; + self.ping_delay /= 2; Some(self.bdp) } else { + self.stabilize_delay(); None } } + + fn stabilize_delay(&mut self) { + if self.ping_delay < Duration::from_secs(10) { + self.stable_count += 1; + + if self.stable_count >= 2 { + self.ping_delay *= 4; + self.stable_count = 0; + } + } + } } fn seconds(dur: Duration) -> f64 { From be9677a1e782d33c4402772e0fc4ef0a4c49d507 Mon Sep 17 00:00:00 2001 From: Geoffry Song Date: Wed, 19 May 2021 10:01:20 -0700 Subject: [PATCH 070/420] feat(http2): allow HTTP/2 requests by ALPN when http2_only is unset (#2527) --- src/client/client.rs | 13 ++++++++----- tests/client.rs | 12 +++++++++++- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index 90d94e12ed..3b6a0d9f31 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -170,11 +170,7 @@ where ))); } } - other_h2 @ Version::HTTP_2 => { - if self.config.ver != Ver::Http2 { - return ResponseFuture::error_version(other_h2); - } - } + Version::HTTP_2 => (), // completely unsupported HTTP version (like HTTP/0.9)! other => return ResponseFuture::error_version(other), }; @@ -230,6 +226,13 @@ where let mut pooled = self.connection_for(pool_key).await?; if pooled.is_http1() { + if req.version() == Version::HTTP_2 { + warn!("Connection is HTTP/1, but request requires HTTP/2"); + return Err(ClientError::Normal( + crate::Error::new_user_unsupported_version(), + )); + } + if self.config.set_host { let uri = req.uri().clone(); req.headers_mut().entry(HOST).or_insert_with(|| { diff --git a/tests/client.rs b/tests/client.rs index 52747a30c6..978f79a1d1 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -2116,9 +2116,19 @@ mod dispatch_impl { // so the unwrapped responses futures show it still worked. assert_eq!(connects.load(Ordering::SeqCst), 3); - let res4 = client.get(url); + let res4 = client.get(url.clone()); rt.block_on(res4).unwrap(); + // HTTP/2 request allowed + let res5 = client.request( + Request::builder() + .uri(url) + .version(hyper::Version::HTTP_2) + .body(Default::default()) + .unwrap(), + ); + rt.block_on(res5).unwrap(); + assert_eq!( connects.load(Ordering::SeqCst), 3, From 5442b6faddaff9aeb7c073031a3b7aa4497fda4d Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Mon, 24 May 2021 20:20:44 +0200 Subject: [PATCH 071/420] feat(http2): Implement Client-side CONNECT support over HTTP/2 (#2523) Closes #2508 --- Cargo.toml | 2 +- src/body/length.rs | 11 ++ src/client/client.rs | 7 +- src/error.rs | 6 +- src/proto/h2/client.rs | 121 +++++++++++---- src/proto/h2/mod.rs | 208 ++++++++++++++++++++++--- src/proto/h2/server.rs | 88 +++++++++-- src/upgrade.rs | 9 +- tests/client.rs | 123 ++++++++++++++- tests/server.rs | 336 +++++++++++++++++++++++++++++++++++++++++ 10 files changed, 833 insertions(+), 78 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4a94f06aa9..93624a1ca6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ http = "0.2" http-body = "0.4" httpdate = "1.0" httparse = "1.4" -h2 = { version = "0.3", optional = true } +h2 = { version = "0.3.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } pin-project = "1.0" diff --git a/src/body/length.rs b/src/body/length.rs index aa9cf3dcd5..633a911fb2 100644 --- a/src/body/length.rs +++ b/src/body/length.rs @@ -3,6 +3,17 @@ use std::fmt; #[derive(Clone, Copy, PartialEq, Eq)] pub(crate) struct DecodedLength(u64); +#[cfg(any(feature = "http1", feature = "http2"))] +impl From> for DecodedLength { + fn from(len: Option) -> Self { + len.and_then(|len| { + // If the length is u64::MAX, oh well, just reported chunked. + Self::checked_new(len).ok() + }) + .unwrap_or(DecodedLength::CHUNKED) + } +} + #[cfg(any(feature = "http1", feature = "http2", test))] const MAX_LEN: u64 = std::u64::MAX - 2; diff --git a/src/client/client.rs b/src/client/client.rs index 3b6a0d9f31..a5d8dcfaf7 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -254,12 +254,9 @@ where absolute_form(req.uri_mut()); } else { origin_form(req.uri_mut()); - }; + } } else if req.method() == Method::CONNECT { - debug!("client does not support CONNECT requests over HTTP2"); - return Err(ClientError::Normal( - crate::Error::new_user_unsupported_request_method(), - )); + authority_form(req.uri_mut()); } let fut = pooled diff --git a/src/error.rs b/src/error.rs index 6912cd3a70..dd577b99a6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -90,7 +90,7 @@ pub(super) enum User { /// User tried to send a certain header in an unexpected context. /// /// For example, sending both `content-length` and `transfer-encoding`. - #[cfg(feature = "http1")] + #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] UnexpectedHeader, /// User tried to create a Request with bad version. @@ -290,7 +290,7 @@ impl Error { Error::new(Kind::User(user)) } - #[cfg(feature = "http1")] + #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] pub(super) fn new_user_header() -> Error { Error::new_user(User::UnexpectedHeader) @@ -405,7 +405,7 @@ impl Error { Kind::User(User::MakeService) => "error from user's MakeService", #[cfg(any(feature = "http1", feature = "http2"))] Kind::User(User::Service) => "error from user's Service", - #[cfg(feature = "http1")] + #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] Kind::User(User::UnexpectedHeader) => "user sent unexpected header", #[cfg(any(feature = "http1", feature = "http2"))] diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 6d310e94c9..3692a8f253 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -2,17 +2,21 @@ use std::error::Error as StdError; #[cfg(feature = "runtime")] use std::time::Duration; +use bytes::Bytes; use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use futures_util::stream::StreamExt as _; use h2::client::{Builder, SendRequest}; +use http::{Method, StatusCode}; use tokio::io::{AsyncRead, AsyncWrite}; -use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; +use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::HttpBody; use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; use crate::headers; +use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; +use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; type ClientRx = crate::client::dispatch::Receiver, Response>; @@ -233,8 +237,25 @@ where headers::set_content_length_if_missing(req.headers_mut(), len); } } + + let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); - let (fut, body_tx) = match self.h2_tx.send_request(req, eos) { + let ping = self.ping.clone(); + + if is_connect { + if headers::content_length_parse_all(req.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 connect request with non-zero body not supported"); + cb.send(Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + ))); + continue; + } + } + + let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) { Ok(ok) => ok, Err(err) => { debug!("client send request error: {}", err); @@ -243,45 +264,81 @@ where } }; - let ping = self.ping.clone(); - if !eos { - let mut pipe = Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); - - // eagerly see if the body pipe is ready and - // can thus skip allocating in the executor - match Pin::new(&mut pipe).poll(cx) { - Poll::Ready(_) => (), - Poll::Pending => { - let conn_drop_ref = self.conn_drop_ref.clone(); - // keep the ping recorder's knowledge of an - // "open stream" alive while this body is - // still sending... - let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x + let send_stream = if !is_connect { + if !eos { + let mut pipe = + Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { + if let Err(e) = res { + debug!("client request body error: {}", e); + } }); - self.executor.execute(pipe); + + // eagerly see if the body pipe is ready and + // can thus skip allocating in the executor + match Pin::new(&mut pipe).poll(cx) { + Poll::Ready(_) => (), + Poll::Pending => { + let conn_drop_ref = self.conn_drop_ref.clone(); + // keep the ping recorder's knowledge of an + // "open stream" alive while this body is + // still sending... + let ping = ping.clone(); + let pipe = pipe.map(move |x| { + drop(conn_drop_ref); + drop(ping); + x + }); + self.executor.execute(pipe); + } } } - } + + None + } else { + Some(body_tx) + }; let fut = fut.map(move |result| match result { Ok(res) => { // record that we got the response headers ping.record_non_data(); - let content_length = decode_content_length(res.headers()); - let res = res.map(|stream| { - let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length, ping) - }); - Ok(res) + let content_length = headers::content_length_parse_all(res.headers()); + if let (Some(mut send_stream), StatusCode::OK) = + (send_stream, res.status()) + { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect response with non-zero body not supported"); + + send_stream.send_reset(h2::Reason::INTERNAL_ERROR); + return Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + )); + } + let (parts, recv_stream) = res.into_parts(); + let mut res = Response::from_parts(parts, Body::empty()); + + let (pending, on_upgrade) = crate::upgrade::pending(); + let io = H2Upgraded { + ping, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + recv_stream, + buf: Bytes::new(), + }; + let upgraded = Upgraded::new(io, Bytes::new()); + + pending.fulfill(upgraded); + res.extensions_mut().insert(on_upgrade); + + Ok(res) + } else { + let res = res.map(|stream| { + let ping = ping.for_stream(&stream); + crate::Body::h2(stream, content_length.into(), ping) + }); + Ok(res) + } } Err(err) => { ping.ensure_not_timed_out().map_err(|e| (e, None))?; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index cf06592903..0dbcc8d466 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -1,5 +1,5 @@ -use bytes::Buf; -use h2::SendStream; +use bytes::{Buf, Bytes}; +use h2::{RecvStream, SendStream}; use http::header::{ HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE, @@ -7,11 +7,14 @@ use http::header::{ use http::HeaderMap; use pin_project::pin_project; use std::error::Error as StdError; -use std::io::IoSlice; +use std::io::{self, Cursor, IoSlice}; +use std::mem; +use std::task::Context; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use crate::body::{DecodedLength, HttpBody}; +use crate::body::HttpBody; use crate::common::{task, Future, Pin, Poll}; -use crate::headers::content_length_parse_all; +use crate::proto::h2::ping::Recorder; pub(crate) mod ping; @@ -83,15 +86,6 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { } } -fn decode_content_length(headers: &HeaderMap) -> DecodedLength { - if let Some(len) = content_length_parse_all(headers) { - // If the length is u64::MAX, oh well, just reported chunked. - DecodedLength::checked_new(len).unwrap_or_else(|_| DecodedLength::CHUNKED) - } else { - DecodedLength::CHUNKED - } -} - // body adapters used by both Client and Server #[pin_project] @@ -172,7 +166,7 @@ where is_eos, ); - let buf = SendBuf(Some(chunk)); + let buf = SendBuf::Buf(chunk); me.body_tx .send_data(buf, is_eos) .map_err(crate::Error::new_body_write)?; @@ -243,32 +237,202 @@ impl SendStreamExt for SendStream> { fn send_eos_frame(&mut self) -> crate::Result<()> { trace!("send body eos"); - self.send_data(SendBuf(None), true) + self.send_data(SendBuf::None, true) .map_err(crate::Error::new_body_write) } } -struct SendBuf(Option); +#[repr(usize)] +enum SendBuf { + Buf(B), + Cursor(Cursor>), + None, +} impl Buf for SendBuf { #[inline] fn remaining(&self) -> usize { - self.0.as_ref().map(|b| b.remaining()).unwrap_or(0) + match *self { + Self::Buf(ref b) => b.remaining(), + Self::Cursor(ref c) => c.remaining(), + Self::None => 0, + } } #[inline] fn chunk(&self) -> &[u8] { - self.0.as_ref().map(|b| b.chunk()).unwrap_or(&[]) + match *self { + Self::Buf(ref b) => b.chunk(), + Self::Cursor(ref c) => c.chunk(), + Self::None => &[], + } } #[inline] fn advance(&mut self, cnt: usize) { - if let Some(b) = self.0.as_mut() { - b.advance(cnt) + match *self { + Self::Buf(ref mut b) => b.advance(cnt), + Self::Cursor(ref mut c) => c.advance(cnt), + Self::None => {} } } fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - self.0.as_ref().map(|b| b.chunks_vectored(dst)).unwrap_or(0) + match *self { + Self::Buf(ref b) => b.chunks_vectored(dst), + Self::Cursor(ref c) => c.chunks_vectored(dst), + Self::None => 0, + } + } +} + +struct H2Upgraded +where + B: Buf, +{ + ping: Recorder, + send_stream: UpgradedSendStream, + recv_stream: RecvStream, + buf: Bytes, +} + +impl AsyncRead for H2Upgraded +where + B: Buf, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + read_buf: &mut ReadBuf<'_>, + ) -> Poll> { + if self.buf.is_empty() { + self.buf = loop { + match ready!(self.recv_stream.poll_data(cx)) { + None => return Poll::Ready(Ok(())), + Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => { + continue + } + Some(Ok(buf)) => { + self.ping.record_data(buf.len()); + break buf; + } + Some(Err(e)) => { + return Poll::Ready(Err(h2_to_io_error(e))); + } + } + }; + } + let cnt = std::cmp::min(self.buf.len(), read_buf.remaining()); + read_buf.put_slice(&self.buf[..cnt]); + self.buf.advance(cnt); + let _ = self.recv_stream.flow_control().release_capacity(cnt); + Poll::Ready(Ok(())) + } +} + +impl AsyncWrite for H2Upgraded +where + B: Buf, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + if let Poll::Ready(reset) = self.send_stream.poll_reset(cx) { + return Poll::Ready(Err(h2_to_io_error(match reset { + Ok(reason) => reason.into(), + Err(e) => e, + }))); + } + if buf.is_empty() { + return Poll::Ready(Ok(0)); + } + self.send_stream.reserve_capacity(buf.len()); + Poll::Ready(match ready!(self.send_stream.poll_capacity(cx)) { + None => Ok(0), + Some(Ok(cnt)) => self.send_stream.write(&buf[..cnt], false).map(|()| cnt), + Some(Err(e)) => Err(h2_to_io_error(e)), + }) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(self.send_stream.write(&[], true)) + } +} + +fn h2_to_io_error(e: h2::Error) -> io::Error { + if e.is_io() { + e.into_io().unwrap() + } else { + io::Error::new(io::ErrorKind::Other, e) + } +} + +struct UpgradedSendStream(SendStream>>); + +impl UpgradedSendStream +where + B: Buf, +{ + unsafe fn new(inner: SendStream>) -> Self { + assert_eq!(mem::size_of::(), mem::size_of::>()); + Self(mem::transmute(inner)) + } + + fn reserve_capacity(&mut self, cnt: usize) { + unsafe { self.as_inner_unchecked().reserve_capacity(cnt) } + } + + fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll>> { + unsafe { self.as_inner_unchecked().poll_capacity(cx) } + } + + fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll> { + unsafe { self.as_inner_unchecked().poll_reset(cx) } + } + + fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { + let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); + unsafe { + self.as_inner_unchecked() + .send_data(send_buf, end_of_stream) + .map_err(h2_to_io_error) + } + } + + unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream> { + &mut *(&mut self.0 as *mut _ as *mut _) + } +} + +#[repr(transparent)] +struct Neutered { + _inner: B, + impossible: Impossible, +} + +enum Impossible {} + +unsafe impl Send for Neutered {} + +impl Buf for Neutered { + fn remaining(&self) -> usize { + match self.impossible {} + } + + fn chunk(&self) -> &[u8] { + match self.impossible {} + } + + fn advance(&mut self, _cnt: usize) { + match self.impossible {} } } diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index eea52e3e4b..de77eaa232 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -3,19 +3,24 @@ use std::marker::Unpin; #[cfg(feature = "runtime")] use std::time::Duration; +use bytes::Bytes; use h2::server::{Connection, Handshake, SendResponse}; -use h2::Reason; +use h2::{Reason, RecvStream}; +use http::{Method, Request}; use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; -use super::{decode_content_length, ping, PipeToSendStream, SendBuf}; +use super::{ping, PipeToSendStream, SendBuf}; use crate::body::HttpBody; use crate::common::exec::ConnStreamExec; use crate::common::{date, task, Future, Pin, Poll}; use crate::headers; +use crate::proto::h2::ping::Recorder; +use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; use crate::proto::Dispatched; use crate::service::HttpService; +use crate::upgrade::{OnUpgrade, Pending, Upgraded}; use crate::{Body, Response}; // Our defaults are chosen for the "majority" case, which usually are not @@ -255,9 +260,9 @@ where // When the service is ready, accepts an incoming request. match ready!(self.conn.poll_accept(cx)) { - Some(Ok((req, respond))) => { + Some(Ok((req, mut respond))) => { trace!("incoming request"); - let content_length = decode_content_length(req.headers()); + let content_length = headers::content_length_parse_all(req.headers()); let ping = self .ping .as_ref() @@ -267,8 +272,36 @@ where // Record the headers received ping.record_non_data(); - let req = req.map(|stream| crate::Body::h2(stream, content_length, ping)); - let fut = H2Stream::new(service.call(req), respond); + let is_connect = req.method() == Method::CONNECT; + let (mut parts, stream) = req.into_parts(); + let (req, connect_parts) = if !is_connect { + ( + Request::from_parts( + parts, + crate::Body::h2(stream, content_length.into(), ping), + ), + None, + ) + } else { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect request with non-zero body not supported"); + respond.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Ok(())); + } + let (pending, upgrade) = crate::upgrade::pending(); + debug_assert!(parts.extensions.get::().is_none()); + parts.extensions.insert(upgrade); + ( + Request::from_parts(parts, crate::Body::empty()), + Some(ConnectParts { + pending, + ping, + recv_stream: stream, + }), + ) + }; + + let fut = H2Stream::new(service.call(req), connect_parts, respond); exec.execute_h2stream(fut); } Some(Err(e)) => { @@ -331,18 +364,28 @@ enum H2StreamState where B: HttpBody, { - Service(#[pin] F), + Service(#[pin] F, Option), Body(#[pin] PipeToSendStream), } +struct ConnectParts { + pending: Pending, + ping: Recorder, + recv_stream: RecvStream, +} + impl H2Stream where B: HttpBody, { - fn new(fut: F, respond: SendResponse>) -> H2Stream { + fn new( + fut: F, + connect_parts: Option, + respond: SendResponse>, + ) -> H2Stream { H2Stream { reply: respond, - state: H2StreamState::Service(fut), + state: H2StreamState::Service(fut, connect_parts), } } } @@ -364,6 +407,7 @@ impl H2Stream where F: Future, E>>, B: HttpBody, + B::Data: 'static, B::Error: Into>, E: Into>, { @@ -371,7 +415,7 @@ where let mut me = self.project(); loop { let next = match me.state.as_mut().project() { - H2StreamStateProj::Service(h) => { + H2StreamStateProj::Service(h, connect_parts) => { let res = match h.poll(cx) { Poll::Ready(Ok(r)) => r, Poll::Pending => { @@ -402,6 +446,29 @@ where .entry(::http::header::DATE) .or_insert_with(date::update_and_header_value); + if let Some(connect_parts) = connect_parts.take() { + if res.status().is_success() { + if headers::content_length_parse_all(res.headers()) + .map_or(false, |len| len != 0) + { + warn!("h2 successful response to CONNECT request with body not supported"); + me.reply.send_reset(h2::Reason::INTERNAL_ERROR); + return Poll::Ready(Err(crate::Error::new_user_header())); + } + let send_stream = reply!(me, res, false); + connect_parts.pending.fulfill(Upgraded::new( + H2Upgraded { + ping: connect_parts.ping, + recv_stream: connect_parts.recv_stream, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + buf: Bytes::new(), + }, + Bytes::new(), + )); + return Poll::Ready(Ok(())); + } + } + // automatically set Content-Length from body... if let Some(len) = body.size_hint().exact() { headers::set_content_length_if_missing(res.headers_mut(), len); @@ -428,6 +495,7 @@ impl Future for H2Stream where F: Future, E>>, B: HttpBody, + B::Data: 'static, B::Error: Into>, E: Into>, { diff --git a/src/upgrade.rs b/src/upgrade.rs index 6004c1a31a..efab10a6fc 100644 --- a/src/upgrade.rs +++ b/src/upgrade.rs @@ -62,12 +62,12 @@ pub fn on(msg: T) -> OnUpgrade { msg.on_upgrade() } -#[cfg(feature = "http1")] +#[cfg(any(feature = "http1", feature = "http2"))] pub(super) struct Pending { tx: oneshot::Sender>, } -#[cfg(feature = "http1")] +#[cfg(any(feature = "http1", feature = "http2"))] pub(super) fn pending() -> (Pending, OnUpgrade) { let (tx, rx) = oneshot::channel(); (Pending { tx }, OnUpgrade { rx: Some(rx) }) @@ -76,7 +76,7 @@ pub(super) fn pending() -> (Pending, OnUpgrade) { // ===== impl Upgraded ===== impl Upgraded { - #[cfg(any(feature = "http1", test))] + #[cfg(any(feature = "http1", feature = "http2", test))] pub(super) fn new(io: T, read_buf: Bytes) -> Self where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, @@ -187,13 +187,14 @@ impl fmt::Debug for OnUpgrade { // ===== impl Pending ===== -#[cfg(feature = "http1")] +#[cfg(any(feature = "http1", feature = "http2"))] impl Pending { pub(super) fn fulfill(self, upgraded: Upgraded) { trace!("pending upgrade fulfill"); let _ = self.tx.send(Ok(upgraded)); } + #[cfg(feature = "http1")] /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. pub(super) fn manual(self) { diff --git a/tests/client.rs b/tests/client.rs index 978f79a1d1..3eb6dd9015 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -2261,14 +2261,16 @@ mod conn { use std::thread; use std::time::Duration; + use bytes::Buf; use futures_channel::oneshot; use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt}; use futures_util::StreamExt; + use hyper::upgrade::OnUpgrade; use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf}; use tokio::net::{TcpListener as TkTcpListener, TcpStream}; use hyper::client::conn; - use hyper::{self, Body, Method, Request}; + use hyper::{self, Body, Method, Request, Response, StatusCode}; use super::{concat, s, support, tcp_connect, FutureHyperExt}; @@ -2984,6 +2986,125 @@ mod conn { .expect("client should be open"); } + #[tokio::test] + async fn h2_connect() { + let _ = pretty_env_logger::try_init(); + + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + // Spawn an HTTP2 server that asks for bread and responds with baguette. + tokio::spawn(async move { + let sock = listener.accept().await.unwrap().0; + let mut h2 = h2::server::handshake(sock).await.unwrap(); + + let (req, mut respond) = h2.accept().await.unwrap().unwrap(); + tokio::spawn(async move { + poll_fn(|cx| h2.poll_closed(cx)).await.unwrap(); + }); + assert_eq!(req.method(), Method::CONNECT); + + let mut body = req.into_body(); + + let mut send_stream = respond.send_response(Response::default(), false).unwrap(); + + send_stream.send_data("Bread?".into(), true).unwrap(); + + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Baguette!"); + let _ = body.flow_control().release_capacity(bytes.len()); + + assert!(body.data().await.is_none()); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::Builder::new() + .http2_only(true) + .handshake::<_, Body>(io) + .await + .expect("http handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + let req = Request::connect("localhost") + .body(hyper::Body::empty()) + .unwrap(); + let res = client.send_request(req).await.expect("send_request"); + assert_eq!(res.status(), StatusCode::OK); + + let mut upgraded = hyper::upgrade::on(res).await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Bread?"); + + upgraded.write_all(b"Baguette!").await.unwrap(); + + upgraded.shutdown().await.unwrap(); + } + + #[tokio::test] + async fn h2_connect_rejected() { + let _ = pretty_env_logger::try_init(); + + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + let (done_tx, done_rx) = oneshot::channel(); + + tokio::spawn(async move { + let sock = listener.accept().await.unwrap().0; + let mut h2 = h2::server::handshake(sock).await.unwrap(); + + let (req, mut respond) = h2.accept().await.unwrap().unwrap(); + tokio::spawn(async move { + poll_fn(|cx| h2.poll_closed(cx)).await.unwrap(); + }); + assert_eq!(req.method(), Method::CONNECT); + + let res = Response::builder().status(400).body(()).unwrap(); + let mut send_stream = respond.send_response(res, false).unwrap(); + send_stream + .send_data("No bread for you!".into(), true) + .unwrap(); + done_rx.await.unwrap(); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::Builder::new() + .http2_only(true) + .handshake::<_, Body>(io) + .await + .expect("http handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + let req = Request::connect("localhost") + .body(hyper::Body::empty()) + .unwrap(); + let res = client.send_request(req).await.expect("send_request"); + assert_eq!(res.status(), StatusCode::BAD_REQUEST); + assert!(res.extensions().get::().is_none()); + + let mut body = String::new(); + hyper::body::aggregate(res.into_body()) + .await + .unwrap() + .reader() + .read_to_string(&mut body) + .unwrap(); + assert_eq!(body, "No bread for you!"); + + done_tx.send(()).unwrap(); + } + async fn drain_til_eof(mut sock: T) -> io::Result<()> { let mut buf = [0u8; 1024]; loop { diff --git a/tests/server.rs b/tests/server.rs index 662e903d57..297b09ac73 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -13,10 +13,13 @@ use std::task::{Context, Poll}; use std::thread; use std::time::Duration; +use bytes::Bytes; use futures_channel::oneshot; use futures_util::future::{self, Either, FutureExt, TryFutureExt}; #[cfg(feature = "stream")] use futures_util::stream::StreamExt as _; +use h2::client::SendRequest; +use h2::{RecvStream, SendStream}; use http::header::{HeaderName, HeaderValue}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf}; use tokio::net::{TcpListener, TcpStream as TkTcpStream}; @@ -1482,6 +1485,339 @@ async fn http_connect_new() { assert_eq!(s(&vec), "bar=foo"); } +#[tokio::test] +async fn h2_connect() { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let _ = pretty_env_logger::try_init(); + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + send_stream.send_data("Baguette!".into(), true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + Response::builder() + .status(200) + .body(hyper::Body::empty()) + .unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + Http::new() + .http2_only(true) + .serve_connection(socket, svc) + .with_upgrades() + .await + .unwrap(); +} + +#[tokio::test] +async fn h2_connect_multiplex() { + use futures_util::stream::FuturesUnordered; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let _ = pretty_env_logger::try_init(); + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + tokio::spawn(async move { + let mut streams = vec![]; + for i in 0..80 { + let request = Request::connect(format!("localhost_{}", i % 4)) + .body(()) + .unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + streams.push((i, response, send_stream)); + } + + let futures = streams + .into_iter() + .map(|(i, response, mut send_stream)| async move { + if i % 4 == 0 { + return; + } + + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + if i % 4 == 1 { + return; + } + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + if i % 4 == 2 { + return; + } + + send_stream.send_data("Baguette!".into(), true).unwrap(); + + assert!(body.data().await.unwrap().unwrap().is_empty()); + }) + .collect::>(); + + futures.for_each(future::ready).await; + }); + + let svc = service_fn(move |req: Request| { + let authority = req.uri().authority().unwrap().to_string(); + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let upgrade_res = on_upgrade.await; + if authority == "localhost_0" { + assert!(upgrade_res.expect_err("upgrade cancelled").is_canceled()); + return; + } + let mut upgraded = upgrade_res.expect("upgrade successful"); + + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + let read_res = upgraded.read_to_end(&mut vec).await; + + if authority == "localhost_1" || authority == "localhost_2" { + let err = read_res.expect_err("read failed"); + assert_eq!(err.kind(), io::ErrorKind::Other); + assert_eq!( + err.get_ref() + .unwrap() + .downcast_ref::() + .unwrap() + .reason(), + Some(h2::Reason::CANCEL), + ); + return; + } + + read_res.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + Response::builder() + .status(200) + .body(hyper::Body::empty()) + .unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + Http::new() + .http2_only(true) + .serve_connection(socket, svc) + .with_upgrades() + .await + .unwrap(); +} + +#[tokio::test] +async fn h2_connect_large_body() { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let _ = pretty_env_logger::try_init(); + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + const NO_BREAD: &str = "All work and no bread makes nox a dull boy.\n"; + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + let large_body = Bytes::from(NO_BREAD.repeat(9000)); + + send_stream.send_data(large_body.clone(), false).unwrap(); + send_stream.send_data(large_body, true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + if upgraded.read_to_end(&mut vec).await.is_err() { + return; + } + assert_eq!(vec.len(), NO_BREAD.len() * 9000 * 2); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + Response::builder() + .status(200) + .body(hyper::Body::empty()) + .unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + Http::new() + .http2_only(true) + .serve_connection(socket, svc) + .with_upgrades() + .await + .unwrap(); +} + +#[tokio::test] +async fn h2_connect_empty_frames() { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let _ = pretty_env_logger::try_init(); + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + send_stream.send_data("".into(), false).unwrap(); + send_stream.send_data("".into(), false).unwrap(); + send_stream.send_data("".into(), false).unwrap(); + send_stream.send_data("Baguette!".into(), false).unwrap(); + send_stream.send_data("".into(), true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + Response::builder() + .status(200) + .body(hyper::Body::empty()) + .unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + Http::new() + .http2_only(true) + .serve_connection(socket, svc) + .with_upgrades() + .await + .unwrap(); +} + #[tokio::test] async fn parse_errors_send_4xx_response() { let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); From e61b494e3b6c7bf0247135b0c9ade516126701e9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 25 May 2021 06:57:02 -0700 Subject: [PATCH 072/420] v0.14.8 --- CHANGELOG.md | 17 +++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c01ddf91f9..443f275357 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +### v0.14.8 (2021-05-25) + + +#### Features + +* **client:** allow to config http2 max concurrent reset streams (#2535) ([b9916c41](https://github.com/hyperium/hyper/commit/b9916c410182c6225e857f0cded355ea1b74c865)) +* **error:** add `Error::is_parse_too_large` and `Error::is_parse_status` methods (#2538) ([960a69a5](https://github.com/hyperium/hyper/commit/960a69a5878ede82c56f50ac1444a9e75e885a8f)) +* **http2:** + * Implement Client-side CONNECT support over HTTP/2 (#2523) ([5442b6fa](https://github.com/hyperium/hyper/commit/5442b6faddaff9aeb7c073031a3b7aa4497fda4d), closes [#2508](https://github.com/hyperium/hyper/issues/2508)) + * allow HTTP/2 requests by ALPN when http2_only is unset (#2527) ([be9677a1](https://github.com/hyperium/hyper/commit/be9677a1e782d33c4402772e0fc4ef0a4c49d507)) + + +#### Performance + +* **http2:** reduce amount of adaptive window pings as BDP stabilizes (#2550) ([4cd06bf2](https://github.com/hyperium/hyper/commit/4cd06bf2)) + + ### v0.14.7 (2021-04-22) diff --git a/Cargo.toml b/Cargo.toml index 93624a1ca6..42358c41bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.7" # don't forget to update html_root_url +version = "0.14.8" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From eb0c64639503bbd4f6e3b1ce3a02bff8eeea7ee8 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 26 May 2021 16:47:36 -0700 Subject: [PATCH 073/420] fix(http1): reduce memory used with flatten write strategy If the write buffer was filled with large bufs from the user, such that it couldn't be fully written to the transport, the write buffer could start to grow significantly as it moved its cursor without shifting over the unwritten bytes. This will now try to shift over the unwritten bytes if the next buf wouldn't fit in the already allocated space. --- src/proto/h1/io.rs | 70 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 5cf670991b..48c6d87885 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -56,7 +56,12 @@ where B: Buf, { pub(crate) fn new(io: T) -> Buffered { - let write_buf = WriteBuf::new(&io); + let strategy = if io.is_write_vectored() { + WriteStrategy::Queue + } else { + WriteStrategy::Flatten + }; + let write_buf = WriteBuf::new(strategy); Buffered { flush_pipeline: false, io, @@ -419,6 +424,24 @@ impl> Cursor { } impl Cursor> { + /// If we've advanced the position a bit in this cursor, and wish to + /// extend the underlying vector, we may wish to unshift the "read" bytes + /// off, and move everything else over. + fn maybe_unshift(&mut self, additional: usize) { + if self.pos == 0 { + // nothing to do + return; + } + + if self.bytes.capacity() - self.bytes.len() >= additional { + // there's room! + return; + } + + self.bytes.drain(0..self.pos); + self.pos = 0; + } + fn reset(&mut self) { self.pos = 0; self.bytes.clear(); @@ -463,12 +486,7 @@ pub(super) struct WriteBuf { } impl WriteBuf { - fn new(io: &impl AsyncWrite) -> WriteBuf { - let strategy = if io.is_write_vectored() { - WriteStrategy::Queue - } else { - WriteStrategy::Flatten - }; + fn new(strategy: WriteStrategy) -> WriteBuf { WriteBuf { headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), max_buf_size: DEFAULT_MAX_BUFFER_SIZE, @@ -492,6 +510,8 @@ where match self.strategy { WriteStrategy::Flatten => { let head = self.headers_mut(); + + head.maybe_unshift(buf.remaining()); //perf: This is a little faster than >::put, //but accomplishes the same result. loop { @@ -804,7 +824,6 @@ mod tests { let _ = pretty_env_logger::try_init(); let mock = Mock::new() - // Just a single write .write(b"hello world, it's hyper!") .build(); @@ -820,6 +839,41 @@ mod tests { buffered.flush().await.expect("flush"); } + #[test] + fn write_buf_flatten_partially_flushed() { + let _ = pretty_env_logger::try_init(); + + let b = |s: &str| Cursor::new(s.as_bytes().to_vec()); + + let mut write_buf = WriteBuf::>>::new(WriteStrategy::Flatten); + + write_buf.buffer(b("hello ")); + write_buf.buffer(b("world, ")); + + assert_eq!(write_buf.chunk(), b"hello world, "); + + // advance most of the way, but not all + write_buf.advance(11); + + assert_eq!(write_buf.chunk(), b", "); + assert_eq!(write_buf.headers.pos, 11); + assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE); + + // there's still room in the headers buffer, so just push on the end + write_buf.buffer(b("it's hyper!")); + + assert_eq!(write_buf.chunk(), b", it's hyper!"); + assert_eq!(write_buf.headers.pos, 11); + + let rem1 = write_buf.remaining(); + let cap = write_buf.headers.bytes.capacity(); + + // but when this would go over capacity, don't copy the old bytes + write_buf.buffer(Cursor::new(vec![b'X'; cap])); + assert_eq!(write_buf.remaining(), cap + rem1); + assert_eq!(write_buf.headers.pos, 0); + } + #[tokio::test] async fn write_buf_queue_disable_auto() { let _ = pretty_env_logger::try_init(); From 0d82405a7bf6a812bdfe50885f4684cd9421fc92 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 26 May 2021 12:54:09 -0700 Subject: [PATCH 074/420] refactor(http1): emit trace logs when buffering write data --- src/proto/h1/io.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 48c6d87885..4e6efba680 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -512,6 +512,11 @@ where let head = self.headers_mut(); head.maybe_unshift(buf.remaining()); + trace!( + self.len = head.remaining(), + buf.len = buf.remaining(), + "buffer.flatten" + ); //perf: This is a little faster than >::put, //but accomplishes the same result. loop { @@ -527,6 +532,11 @@ where } } WriteStrategy::Queue => { + trace!( + self.len = self.remaining(), + buf.len = buf.remaining(), + "buffer.queue" + ); self.queue.push(buf.into()); } } From 6a6a24030ec53321ce026fe294c56c86f21e19d4 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 4 Jun 2021 23:57:27 +0200 Subject: [PATCH 075/420] refactor(lib): Switch from pin-project to pin-project-lite (#2566) Note the practical affects of this change: - Dependency count with --features full dropped from 65 to 55. - Time to compile after a clean dropped from 48s to 35s (on a pretty underpowered VM). Closes #2388 --- Cargo.toml | 2 +- src/client/conn.rs | 74 +++++++---- src/client/connect/http.rs | 27 ++-- src/client/pool.rs | 25 ++-- src/common/drain.rs | 19 +-- src/common/lazy.rs | 44 ++++--- src/proto/h1/dispatch.rs | 11 +- src/proto/h2/mod.rs | 21 +-- src/proto/h2/server.rs | 76 ++++++----- src/server/accept.rs | 14 +- src/server/conn.rs | 255 +++++++++++++++++++++++-------------- src/server/server.rs | 23 ++-- src/server/shutdown.rs | 43 ++++--- src/server/tcp.rs | 15 ++- src/service/oneshot.rs | 48 ++++--- 15 files changed, 416 insertions(+), 281 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 42358c41bb..be44c3081f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ httparse = "1.4" h2 = { version = "0.3.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } -pin-project = "1.0" +pin-project-lite = "0.2.4" tower-service = "0.3" tokio = { version = "1", features = ["sync"] } want = "0.3" diff --git a/src/client/conn.rs b/src/client/conn.rs index 70c1dad248..c6170007a2 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -48,7 +48,7 @@ use std::error::Error as StdError; use std::fmt; -#[cfg(feature = "http2")] +#[cfg(not(all(feature = "http1", feature = "http2")))] use std::marker::PhantomData; use std::sync::Arc; #[cfg(all(feature = "runtime", feature = "http2"))] @@ -57,12 +57,14 @@ use std::time::Duration; use bytes::Bytes; use futures_util::future::{self, Either, FutureExt as _}; use httparse::ParserConfig; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; use super::dispatch; use crate::body::HttpBody; +#[cfg(not(all(feature = "http1", feature = "http2")))] +use crate::common::Never; use crate::common::{ exec::{BoxSendFuture, Exec}, task, Future, Pin, Poll, @@ -74,17 +76,33 @@ use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; #[cfg(feature = "http1")] -type Http1Dispatcher = proto::dispatch::Dispatcher, B, T, R>; +type Http1Dispatcher = + proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; -#[pin_project(project = ProtoClientProj)] -enum ProtoClient -where - B: HttpBody, -{ - #[cfg(feature = "http1")] - H1(#[pin] Http1Dispatcher), - #[cfg(feature = "http2")] - H2(#[pin] proto::h2::ClientTask, PhantomData), +#[cfg(not(feature = "http1"))] +type Http1Dispatcher = (Never, PhantomData<(T, Pin>)>); + +#[cfg(feature = "http2")] +type Http2ClientTask = proto::h2::ClientTask; + +#[cfg(not(feature = "http2"))] +type Http2ClientTask = (Never, PhantomData>>); + +pin_project! { + #[project = ProtoClientProj] + enum ProtoClient + where + B: HttpBody, + { + H1 { + #[pin] + h1: Http1Dispatcher, + }, + H2 { + #[pin] + h2: Http2ClientTask, + }, + } } /// Returns a handshake future over some IO. @@ -405,7 +423,7 @@ where pub fn into_parts(self) -> Parts { match self.inner.expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1(h1) => { + ProtoClient::H1 { h1 } => { let (io, read_buf, _) = h1.into_inner(); Parts { io, @@ -413,10 +431,12 @@ where _inner: (), } } - #[cfg(feature = "http2")] - ProtoClient::H2(..) => { + ProtoClient::H2 { .. } => { panic!("http2 cannot into_inner"); } + + #[cfg(not(feature = "http1"))] + ProtoClient::H1 { h1 } => match h1.0 {}, } } @@ -434,9 +454,14 @@ where pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { match *self.inner.as_mut().expect("already upgraded") { #[cfg(feature = "http1")] - ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx), + ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), #[cfg(feature = "http2")] - ProtoClient::H2(ref mut h2, _) => Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()), + + #[cfg(not(feature = "http1"))] + ProtoClient::H1 { ref mut h1 } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoClient::H2 { ref mut h2, .. } => match h2.0 {}, } } @@ -465,7 +490,7 @@ where proto::Dispatched::Shutdown => Poll::Ready(Ok(())), #[cfg(feature = "http1")] proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(ProtoClient::H1(h1)) => { + Some(ProtoClient::H1 { h1 }) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); Poll::Ready(Ok(())) @@ -756,14 +781,14 @@ impl Builder { } let cd = proto::h1::dispatch::Client::new(rx); let dispatch = proto::h1::Dispatcher::new(cd, conn); - ProtoClient::H1(dispatch) + ProtoClient::H1 { h1: dispatch } } #[cfg(feature = "http2")] Proto::Http2 => { let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) .await?; - ProtoClient::H2(h2, PhantomData) + ProtoClient::H2 { h2 } } }; @@ -817,9 +842,14 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoClientProj::H1(c) => c.poll(cx), + ProtoClientProj::H1 { h1 } => h1.poll(cx), #[cfg(feature = "http2")] - ProtoClientProj::H2(c, _) => c.poll(cx), + ProtoClientProj::H2 { h2, .. } => h2.poll(cx), + + #[cfg(not(feature = "http1"))] + ProtoClientProj::H1 { h1 } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoClientProj::H2 { h2, .. } => match h2.0 {}, } } } diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 4437c86380..0f1a487adb 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -11,7 +11,7 @@ use std::time::Duration; use futures_util::future::Either; use http::uri::{Scheme, Uri}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; @@ -373,18 +373,19 @@ impl HttpInfo { } } -// Not publicly exported (so missing_docs doesn't trigger). -// -// We return this `Future` instead of the `Pin>` directly -// so that users don't rely on it fitting in a `Pin>` slot -// (and thus we can change the type in the future). -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[allow(missing_debug_implementations)] -pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, +pin_project! { + // Not publicly exported (so missing_docs doesn't trigger). + // + // We return this `Future` instead of the `Pin>` directly + // so that users don't rely on it fitting in a `Pin>` slot + // (and thus we can change the type in the future). + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct HttpConnecting { + #[pin] + fut: BoxConnecting, + _marker: PhantomData, + } } type ConnectResult = Result; diff --git a/src/client/pool.rs b/src/client/pool.rs index 0f22657bd4..94f73f6afd 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -11,7 +11,7 @@ use futures_channel::oneshot; use tokio::time::{Duration, Instant, Interval}; use super::client::Ver; -use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin}; +use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] @@ -714,16 +714,17 @@ impl Expiration { } #[cfg(feature = "runtime")] -#[pin_project::pin_project] -struct IdleTask { - #[pin] - interval: Interval, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - #[pin] - pool_drop_notifier: oneshot::Receiver, +pin_project_lite::pin_project! { + struct IdleTask { + #[pin] + interval: Interval, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + #[pin] + pool_drop_notifier: oneshot::Receiver, + } } #[cfg(feature = "runtime")] @@ -776,7 +777,7 @@ mod tests { use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{task, exec::Exec, Future, Pin}; + use crate::common::{exec::Exec, task, Future, Pin}; /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] diff --git a/src/common/drain.rs b/src/common/drain.rs index 4bb2ecc118..174da876df 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -1,6 +1,6 @@ use std::mem; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::sync::watch; use super::{task, Future, Pin, Poll}; @@ -21,14 +21,15 @@ pub(crate) struct Watch { rx: watch::Receiver<()>, } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Watching { - #[pin] - future: F, - state: State, - watch: Pin + Send + Sync>>, - _rx: watch::Receiver<()>, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Watching { + #[pin] + future: F, + state: State, + watch: Pin + Send + Sync>>, + _rx: watch::Receiver<()>, + } } enum State { diff --git a/src/common/lazy.rs b/src/common/lazy.rs index 6bf87c4355..2722077303 100644 --- a/src/common/lazy.rs +++ b/src/common/lazy.rs @@ -1,4 +1,4 @@ -use pin_project::pin_project; +use pin_project_lite::pin_project; use super::{task, Future, Pin, Poll}; @@ -12,23 +12,27 @@ where R: Future + Unpin, { Lazy { - inner: Inner::Init(func), + inner: Inner::Init { func }, } } // FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -#[pin_project] -pub(crate) struct Lazy { - #[pin] - inner: Inner, +pin_project! { + #[allow(missing_debug_implementations)] + pub(crate) struct Lazy { + #[pin] + inner: Inner, + } } -#[pin_project(project = InnerProj, project_replace = InnerProjReplace)] -enum Inner { - Init(F), - Fut(#[pin] R), - Empty, +pin_project! { + #[project = InnerProj] + #[project_replace = InnerProjReplace] + enum Inner { + Init { func: F }, + Fut { #[pin] fut: R }, + Empty, + } } impl Started for Lazy @@ -38,8 +42,8 @@ where { fn started(&self) -> bool { match self.inner { - Inner::Init(_) => false, - Inner::Fut(_) | Inner::Empty => true, + Inner::Init { .. } => false, + Inner::Fut { .. } | Inner::Empty => true, } } } @@ -54,15 +58,15 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); - if let InnerProj::Fut(f) = this.inner.as_mut().project() { - return f.poll(cx); + if let InnerProj::Fut { fut } = this.inner.as_mut().project() { + return fut.poll(cx); } match this.inner.as_mut().project_replace(Inner::Empty) { - InnerProjReplace::Init(func) => { - this.inner.set(Inner::Fut(func())); - if let InnerProj::Fut(f) = this.inner.project() { - return f.poll(cx); + InnerProjReplace::Init { func } => { + this.inner.set(Inner::Fut { fut: func() }); + if let InnerProj::Fut { fut } = this.inner.project() { + return fut.poll(cx); } unreachable!() } diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 88e641e9a4..1a72450b15 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -44,10 +44,13 @@ cfg_server! { } cfg_client! { - pub(crate) struct Client { - callback: Option, http::Response>>, - rx: ClientRx, - rx_closed: bool, + pin_project_lite::pin_project! { + pub(crate) struct Client { + callback: Option, http::Response>>, + #[pin] + rx: ClientRx, + rx_closed: bool, + } } type ClientRx = crate::client::dispatch::Receiver, http::Response>; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index 0dbcc8d466..b410bab60c 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -5,7 +5,7 @@ use http::header::{ TRANSFER_ENCODING, UPGRADE, }; use http::HeaderMap; -use pin_project::pin_project; +use pin_project_lite::pin_project; use std::error::Error as StdError; use std::io::{self, Cursor, IoSlice}; use std::mem; @@ -88,15 +88,16 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { // body adapters used by both Client and Server -#[pin_project] -struct PipeToSendStream -where - S: HttpBody, -{ - body_tx: SendStream>, - data_done: bool, - #[pin] - stream: S, +pin_project! { + struct PipeToSendStream + where + S: HttpBody, + { + body_tx: SendStream>, + data_done: bool, + #[pin] + stream: S, + } } impl PipeToSendStream diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index de77eaa232..1222663dda 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -7,7 +7,7 @@ use bytes::Bytes; use h2::server::{Connection, Handshake, SendResponse}; use h2::{Reason, RecvStream}; use http::{Method, Request}; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::{ping, PipeToSendStream, SendBuf}; @@ -62,15 +62,16 @@ impl Default for Config { } } -#[pin_project] -pub(crate) struct Server -where - S: HttpService, - B: HttpBody, -{ - exec: E, - service: S, - state: State, +pin_project! { + pub(crate) struct Server + where + S: HttpService, + B: HttpBody, + { + exec: E, + service: S, + state: State, + } } enum State @@ -348,24 +349,34 @@ where } } -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct H2Stream -where - B: HttpBody, -{ - reply: SendResponse>, - #[pin] - state: H2StreamState, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct H2Stream + where + B: HttpBody, + { + reply: SendResponse>, + #[pin] + state: H2StreamState, + } } -#[pin_project(project = H2StreamStateProj)] -enum H2StreamState -where - B: HttpBody, -{ - Service(#[pin] F, Option), - Body(#[pin] PipeToSendStream), +pin_project! { + #[project = H2StreamStateProj] + enum H2StreamState + where + B: HttpBody, + { + Service { + #[pin] + fut: F, + connect_parts: Option, + }, + Body { + #[pin] + pipe: PipeToSendStream, + }, + } } struct ConnectParts { @@ -385,7 +396,7 @@ where ) -> H2Stream { H2Stream { reply: respond, - state: H2StreamState::Service(fut, connect_parts), + state: H2StreamState::Service { fut, connect_parts }, } } } @@ -415,7 +426,10 @@ where let mut me = self.project(); loop { let next = match me.state.as_mut().project() { - H2StreamStateProj::Service(h, connect_parts) => { + H2StreamStateProj::Service { + fut: h, + connect_parts, + } => { let res = match h.poll(cx) { Poll::Ready(Ok(r)) => r, Poll::Pending => { @@ -476,13 +490,15 @@ where if !body.is_end_stream() { let body_tx = reply!(me, res, false); - H2StreamState::Body(PipeToSendStream::new(body, body_tx)) + H2StreamState::Body { + pipe: PipeToSendStream::new(body, body_tx), + } } else { reply!(me, res, true); return Poll::Ready(Ok(())); } } - H2StreamStateProj::Body(pipe) => { + H2StreamStateProj::Body { pipe } => { return pipe.poll(cx); } }; diff --git a/src/server/accept.rs b/src/server/accept.rs index 4ec287129d..4b7a1487dd 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -9,7 +9,7 @@ #[cfg(feature = "stream")] use futures_core::Stream; #[cfg(feature = "stream")] -use pin_project::pin_project; +use pin_project_lite::pin_project; use crate::common::{ task::{self, Poll}, @@ -86,8 +86,12 @@ pub fn from_stream(stream: S) -> impl Accept where S: Stream>, { - #[pin_project] - struct FromStream(#[pin] S); + pin_project! { + struct FromStream { + #[pin] + stream: S, + } + } impl Accept for FromStream where @@ -99,9 +103,9 @@ where self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>> { - self.project().0.poll_next(cx) + self.project().stream.poll_next(cx) } } - FromStream(stream) + FromStream { stream } } diff --git a/src/server/conn.rs b/src/server/conn.rs index c1a52f1c0d..085f890139 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -45,7 +45,7 @@ use std::error::Error as StdError; use std::fmt; -#[cfg(feature = "http1")] +#[cfg(not(all(feature = "http1", feature = "http2")))] use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; @@ -53,7 +53,7 @@ use std::net::SocketAddr; use std::time::Duration; use bytes::Bytes; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -61,6 +61,8 @@ use crate::body::{Body, HttpBody}; use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; #[cfg(feature = "http2")] use crate::common::io::Rewind; +#[cfg(not(all(feature = "http1", feature = "http2")))] +use crate::common::Never; use crate::common::{task, Future, Pin, Poll, Unpin}; #[cfg(all(feature = "http1", feature = "http2"))] use crate::error::{Kind, Parse}; @@ -111,77 +113,93 @@ enum ConnectionMode { Fallback, } -/// A stream mapping incoming IOs to new services. -/// -/// Yields `Connecting`s that are futures that should be put on a reactor. -#[must_use = "streams do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct Serve { - #[pin] - incoming: I, - make_service: S, - protocol: Http, +pin_project! { + /// A stream mapping incoming IOs to new services. + /// + /// Yields `Connecting`s that are futures that should be put on a reactor. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub(super) struct Serve { + #[pin] + incoming: I, + make_service: S, + protocol: Http, + } } -/// A future building a new `Service` to a `Connection`. -/// -/// Wraps the future returned from `MakeService` into one that returns -/// a `Connection`. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http, +pin_project! { + /// A future building a new `Service` to a `Connection`. + /// + /// Wraps the future returned from `MakeService` into one that returns + /// a `Connection`. + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + pub struct Connecting { + #[pin] + future: F, + io: Option, + protocol: Http, + } } -#[must_use = "futures do nothing unless polled"] -#[pin_project] -#[derive(Debug)] -pub(super) struct SpawnAll { - // TODO: re-add `pub(super)` once rustdoc can handle this. - // - // See https://github.com/rust-lang/rust/issues/64705 - #[pin] - pub(super) serve: Serve, +pin_project! { + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + pub(super) struct SpawnAll { + // TODO: re-add `pub(super)` once rustdoc can handle this. + // + // See https://github.com/rust-lang/rust/issues/64705 + #[pin] + pub(super) serve: Serve, + } } -/// A future binding a connection with a Service. -/// -/// Polling this future will drive HTTP forward. -#[must_use = "futures do nothing unless polled"] -#[pin_project] -pub struct Connection -where - S: HttpService, -{ - pub(super) conn: Option>, - #[cfg(all(feature = "http1", feature = "http2"))] - fallback: Fallback, +pin_project! { + /// A future binding a connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + pub(super) conn: Option>, + fallback: Fallback, + } } -#[pin_project(project = ProtoServerProj)] -pub(super) enum ProtoServer -where - S: HttpService, - B: HttpBody, -{ - #[cfg(feature = "http1")] - H1( - #[pin] - proto::h1::Dispatcher< - proto::h1::dispatch::Server, - B, - T, - proto::ServerTransaction, - >, - PhantomData, - ), - #[cfg(feature = "http2")] - H2(#[pin] proto::h2::Server, S, B, E>), +#[cfg(feature = "http1")] +type Http1Dispatcher = + proto::h1::Dispatcher, B, T, proto::ServerTransaction>; + +#[cfg(not(feature = "http1"))] +type Http1Dispatcher = (Never, PhantomData<(T, Box>, Box>)>); + +#[cfg(feature = "http2")] +type Http2Server = proto::h2::Server, S, B, E>; + +#[cfg(not(feature = "http2"))] +type Http2Server = ( + Never, + PhantomData<(T, Box>, Box>, Box>)>, +); + +pin_project! { + #[project = ProtoServerProj] + pub(super) enum ProtoServer + where + S: HttpService, + B: HttpBody, + { + H1 { + #[pin] + h1: Http1Dispatcher, + }, + H2 { + #[pin] + h2: Http2Server, + }, + } } #[cfg(all(feature = "http1", feature = "http2"))] @@ -191,6 +209,9 @@ enum Fallback { Http1Only, } +#[cfg(not(all(feature = "http1", feature = "http2")))] +type Fallback = PhantomData; + #[cfg(all(feature = "http1", feature = "http2"))] impl Fallback { fn to_h2(&self) -> bool { @@ -557,7 +578,9 @@ impl Http { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); - ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn), PhantomData) + ProtoServer::H1 { + h1: proto::h1::Dispatcher::new(sd, conn), + } }}; } @@ -573,19 +596,20 @@ impl Http { let rewind_io = Rewind::new(io); let h2 = proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); - ProtoServer::H2(h2) + ProtoServer::H2 { h2 } } }; Connection { conn: Some(proto), - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "http1", feature = "http2"))] fallback: if self.mode == ConnectionMode::Fallback { Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone()) } else { Fallback::Http1Only }, + #[cfg(not(all(feature = "http1", feature = "http2")))] + fallback: PhantomData, } } @@ -628,17 +652,22 @@ where /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. - pub fn graceful_shutdown(self: Pin<&mut Self>) { - match self.project().conn { + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + match self.conn { #[cfg(feature = "http1")] - Some(ProtoServer::H1(ref mut h1, _)) => { + Some(ProtoServer::H1 { ref mut h1, .. }) => { h1.disable_keep_alive(); } #[cfg(feature = "http2")] - Some(ProtoServer::H2(ref mut h2)) => { + Some(ProtoServer::H2 { ref mut h2 }) => { h2.graceful_shutdown(); } None => (), + + #[cfg(not(feature = "http1"))] + Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {}, + #[cfg(not(feature = "http2"))] + Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {}, } } @@ -662,7 +691,7 @@ where pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1(h1, _) => { + ProtoServer::H1 { h1, .. } => { let (io, read_buf, dispatch) = h1.into_inner(); Some(Parts { io, @@ -671,8 +700,10 @@ where _inner: (), }) } - #[cfg(feature = "http2")] - ProtoServer::H2(_h2) => None, + ProtoServer::H2 { .. } => None, + + #[cfg(not(feature = "http1"))] + ProtoServer::H1 { h1, .. } => match h1.0 {}, } } @@ -696,7 +727,7 @@ where loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] - ProtoServer::H1(ref mut h1, _) => match ready!(h1.poll_without_shutdown(cx)) { + ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { Ok(()) => return Poll::Ready(Ok(())), Err(e) => { #[cfg(feature = "http2")] @@ -712,7 +743,12 @@ where } }, #[cfg(feature = "http2")] - ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()), + ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), + + #[cfg(not(feature = "http1"))] + ProtoServer::H1 { ref mut h1, .. } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoServer::H2 { ref mut h2 } => match h2.0 {}, }; } } @@ -738,8 +774,8 @@ where let conn = self.conn.take(); let (io, read_buf, dispatch) = match conn.unwrap() { - ProtoServer::H1(h1, _) => h1.into_inner(), - ProtoServer::H2(_h2) => { + ProtoServer::H1 { h1, .. } => h1.into_inner(), + ProtoServer::H2 { .. } => { panic!("h2 cannot into_inner"); } }; @@ -752,7 +788,7 @@ where let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); debug_assert!(self.conn.is_none()); - self.conn = Some(ProtoServer::H2(h2)); + self.conn = Some(ProtoServer::H2 { h2 }); } /// Enable this connection to support higher-level HTTP upgrades. @@ -986,9 +1022,14 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] - ProtoServerProj::H1(s, _) => s.poll(cx), + ProtoServerProj::H1 { h1, .. } => h1.poll(cx), #[cfg(feature = "http2")] - ProtoServerProj::H2(s) => s.poll(cx), + ProtoServerProj::H2 { h2 } => h2.poll(cx), + + #[cfg(not(feature = "http1"))] + ProtoServerProj::H1 { h1, .. } => match h1.0 {}, + #[cfg(not(feature = "http2"))] + ProtoServerProj::H2 { h2 } => match h2.0 {}, } } } @@ -1002,7 +1043,7 @@ pub(crate) mod spawn_all { use crate::common::exec::ConnStreamExec; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; - use pin_project::pin_project; + use pin_project_lite::pin_project; // Used by `SpawnAll` to optionally watch a `Connection` future. // @@ -1047,23 +1088,36 @@ pub(crate) mod spawn_all { // Users cannot import this type, nor the associated `NewSvcExec`. Instead, // a blanket implementation for `Executor` is sufficient. - #[pin_project] - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, + pin_project! { + #[allow(missing_debug_implementations)] + pub struct NewSvcTask, E, W: Watcher> { + #[pin] + state: State, + } } - #[pin_project(project = StateProj)] - pub(super) enum State, E, W: Watcher> { - Connecting(#[pin] Connecting, W), - Connected(#[pin] W::Future), + pin_project! { + #[project = StateProj] + pub(super) enum State, E, W: Watcher> { + Connecting { + #[pin] + connecting: Connecting, + watcher: W, + }, + Connected { + #[pin] + future: W::Future, + }, + } } impl, E, W: Watcher> NewSvcTask { pub(super) fn new(connecting: Connecting, watcher: W) -> Self { NewSvcTask { - state: State::Connecting(connecting, watcher), + state: State::Connecting { + connecting, + watcher, + }, } } } @@ -1090,7 +1144,10 @@ pub(crate) mod spawn_all { loop { let next = { match me.state.as_mut().project() { - StateProj::Connecting(connecting, watcher) => { + StateProj::Connecting { + connecting, + watcher, + } => { let res = ready!(connecting.poll(cx)); let conn = match res { Ok(conn) => conn, @@ -1100,10 +1157,10 @@ pub(crate) mod spawn_all { return Poll::Ready(()); } }; - let connected = watcher.watch(conn.with_upgrades()); - State::Connected(connected) + let future = watcher.watch(conn.with_upgrades()); + State::Connected { future } } - StateProj::Connected(future) => { + StateProj::Connected { future } => { return future.poll(cx).map(|res| { if let Err(err) = res { debug!("connection error: {}", err); @@ -1171,7 +1228,7 @@ mod upgrades { #[cfg(feature = "http1")] Ok(proto::Dispatched::Upgrade(pending)) => { match self.inner.conn.take() { - Some(ProtoServer::H1(h1, _)) => { + Some(ProtoServer::H1 { h1, .. }) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); return Poll::Ready(Ok(())); diff --git a/src/server/server.rs b/src/server/server.rs index 20c993e8ac..bdd517808b 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -6,7 +6,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener}; #[cfg(feature = "tcp")] use std::time::Duration; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use super::accept::Accept; @@ -21,16 +21,17 @@ use super::shutdown::{Graceful, GracefulWatcher}; #[cfg(feature = "tcp")] use super::tcp::AddrIncoming; -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// `Server` is a `Future` mapping a bound listener with a set of service -/// handlers. It is built using the [`Builder`](Builder), and the future -/// completes when the server has been shutdown. It should be run by an -/// `Executor`. -#[pin_project] -pub struct Server { - #[pin] - spawn_all: SpawnAll, +pin_project! { + /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. + /// + /// `Server` is a `Future` mapping a bound listener with a set of service + /// handlers. It is built using the [`Builder`](Builder), and the future + /// completes when the server has been shutdown. It should be run by an + /// `Executor`. + pub struct Server { + #[pin] + spawn_all: SpawnAll, + } } /// A builder for a [`Server`](Server). diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index e54ba42104..122853ac17 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -1,33 +1,36 @@ use std::error::Error as StdError; -use pin_project::pin_project; +use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; -use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; use super::accept::Accept; +use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::{HttpService, MakeServiceRef}; -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Graceful { - #[pin] - state: State, +pin_project! { + #[allow(missing_debug_implementations)] + pub struct Graceful { + #[pin] + state: State, + } } -#[pin_project(project = StateProj)] -pub(super) enum State { - Running { - drain: Option<(Signal, Watch)>, - #[pin] - spawn_all: SpawnAll, - #[pin] - signal: F, - }, - Draining(Draining), +pin_project! { + #[project = StateProj] + pub(super) enum State { + Running { + drain: Option<(Signal, Watch)>, + #[pin] + spawn_all: SpawnAll, + #[pin] + signal: F, + }, + Draining { draining: Draining }, + } } impl Graceful { @@ -71,14 +74,16 @@ where Poll::Ready(()) => { debug!("signal received, starting graceful shutdown"); let sig = drain.take().expect("drain channel").0; - State::Draining(sig.drain()) + State::Draining { + draining: sig.drain(), + } } Poll::Pending => { let watch = drain.as_ref().expect("drain channel").1.clone(); return spawn_all.poll_watch(cx, &GracefulWatcher(watch)); } }, - StateProj::Draining(ref mut draining) => { + StateProj::Draining { ref mut draining } => { return Pin::new(draining).poll(cx).map(Ok); } } diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 7b2f68b3a9..792e0034f3 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -199,13 +199,14 @@ mod addr_stream { use crate::common::{task, Pin, Poll}; - /// A transport returned yieled by `AddrIncoming`. - #[pin_project::pin_project] - #[derive(Debug)] - pub struct AddrStream { - #[pin] - inner: TcpStream, - pub(super) remote_addr: SocketAddr, + pin_project_lite::pin_project! { + /// A transport returned yieled by `AddrIncoming`. + #[derive(Debug)] + pub struct AddrStream { + #[pin] + inner: TcpStream, + pub(super) remote_addr: SocketAddr, + } } impl AddrStream { diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs index 766d0c4689..2697af8f4c 100644 --- a/src/service/oneshot.rs +++ b/src/service/oneshot.rs @@ -1,6 +1,6 @@ // TODO: Eventually to be replaced with tower_util::Oneshot. -use pin_project::pin_project; +use pin_project_lite::pin_project; use tower_service::Service; use crate::common::{task, Future, Pin, Poll}; @@ -10,25 +10,35 @@ where S: Service, { Oneshot { - state: State::NotReady(svc, req), + state: State::NotReady { svc, req }, } } -// A `Future` consuming a `Service` and request, waiting until the `Service` -// is ready, and then calling `Service::call` with the request, and -// waiting for that `Future`. -#[allow(missing_debug_implementations)] -#[pin_project] -pub struct Oneshot, Req> { - #[pin] - state: State, +pin_project! { + // A `Future` consuming a `Service` and request, waiting until the `Service` + // is ready, and then calling `Service::call` with the request, and + // waiting for that `Future`. + #[allow(missing_debug_implementations)] + pub struct Oneshot, Req> { + #[pin] + state: State, + } } -#[pin_project(project = StateProj, project_replace = StateProjOwn)] -enum State, Req> { - NotReady(S, Req), - Called(#[pin] S::Future), - Tmp, +pin_project! { + #[project = StateProj] + #[project_replace = StateProjOwn] + enum State, Req> { + NotReady { + svc: S, + req: Req, + }, + Called { + #[pin] + fut: S::Future, + }, + Tmp, + } } impl Future for Oneshot @@ -42,19 +52,19 @@ where loop { match me.state.as_mut().project() { - StateProj::NotReady(ref mut svc, _) => { + StateProj::NotReady { ref mut svc, .. } => { ready!(svc.poll_ready(cx))?; // fallthrough out of the match's borrow } - StateProj::Called(fut) => { + StateProj::Called { fut } => { return fut.poll(cx); } StateProj::Tmp => unreachable!(), } match me.state.as_mut().project_replace(State::Tmp) { - StateProjOwn::NotReady(mut svc, req) => { - me.state.set(State::Called(svc.call(req))); + StateProjOwn::NotReady { mut svc, req } => { + me.state.set(State::Called { fut: svc.call(req) }); } _ => unreachable!(), } From 55d9a584b19a553b105842f6bf954edf46a007ee Mon Sep 17 00:00:00 2001 From: bensadiku <43443348+bensadiku@users.noreply.github.com> Date: Sat, 5 Jun 2021 00:17:37 +0200 Subject: [PATCH 076/420] refactor(http1): return Parse::Internal error if there's an illegal header name or value (#2544) --- src/error.rs | 5 +++++ src/proto/h1/role.rs | 42 +++++++++++++++--------------------------- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/src/error.rs b/src/error.rs index dd577b99a6..c0d9622d50 100644 --- a/src/error.rs +++ b/src/error.rs @@ -73,6 +73,8 @@ pub(super) enum Parse { Header, TooLarge, Status, + #[cfg_attr(debug_assertions, allow(unused))] + Internal, } #[derive(Debug, PartialEq)] @@ -374,6 +376,9 @@ impl Error { Kind::Parse(Parse::Header) => "invalid HTTP header parsed", Kind::Parse(Parse::TooLarge) => "message head is too large", Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", + Kind::Parse(Parse::Internal) => { + "internal error inside Hyper and/or its dependencies, please report" + } Kind::IncompleteMessage => "connection closed before message completed", #[cfg(feature = "http1")] Kind::UnexpectedMessage => "received unexpected message from connection", diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 84dc091147..0d59edf2d6 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -8,9 +8,9 @@ use std::mem; #[cfg(any(test, feature = "server", feature = "ffi"))] use bytes::Bytes; use bytes::BytesMut; -use http::header::{self, Entry, HeaderName, HeaderValue}; #[cfg(feature = "server")] use http::header::ValueIter; +use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; use crate::body::DecodedLength; @@ -29,22 +29,10 @@ const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific macro_rules! header_name { ($bytes:expr) => {{ - #[cfg(debug_assertions)] { match HeaderName::from_bytes($bytes) { Ok(name) => name, - Err(_) => panic!( - "illegal header name from httparse: {:?}", - ::bytes::Bytes::copy_from_slice($bytes) - ), - } - } - - #[cfg(not(debug_assertions))] - { - match HeaderName::from_bytes($bytes) { - Ok(name) => name, - Err(_) => panic!("illegal header name from httparse: {:?}", $bytes), + Err(e) => maybe_panic!(e), } } }}; @@ -52,23 +40,24 @@ macro_rules! header_name { macro_rules! header_value { ($bytes:expr) => {{ - #[cfg(debug_assertions)] { - let __hvb: ::bytes::Bytes = $bytes; - match HeaderValue::from_maybe_shared(__hvb.clone()) { - Ok(name) => name, - Err(_) => panic!("illegal header value from httparse: {:?}", __hvb), - } - } - - #[cfg(not(debug_assertions))] - { - // Unsafe: httparse already validated header value unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) } } }}; } +macro_rules! maybe_panic { + ($($arg:tt)*) => ({ + let _err = ($($arg)*); + if cfg!(debug_assertions) { + panic!("{:?}", _err); + } else { + error!("Internal Hyper error, please report {:?}", _err); + return Err(Parse::Internal) + } + }) +} + pub(super) fn parse_headers( bytes: &mut BytesMut, ctx: ParseContext<'_>, @@ -891,8 +880,7 @@ impl Http1Transaction for Client { ); let mut res = httparse::Response::new(&mut headers); let bytes = buf.as_ref(); - match ctx.h1_parser_config.parse_response(&mut res, bytes) - { + match ctx.h1_parser_config.parse_response(&mut res, bytes) { Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; From 8b71a67413476425097827016fc515049ec0fae4 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 7 Jun 2021 12:31:29 -0700 Subject: [PATCH 077/420] v0.14.9 --- CHANGELOG.md | 10 +++++++++- Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 443f275357..bcf18b3895 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +### v0.14.9 (2021-06-07) + + +#### Bug Fixes + +* **http1:** reduce memory used with flatten write strategy ([eb0c6463](https://github.com/hyperium/hyper/commit/eb0c64639503bbd4f6e3b1ce3a02bff8eeea7ee8)) + + ### v0.14.8 (2021-05-25) @@ -6,7 +14,7 @@ * **client:** allow to config http2 max concurrent reset streams (#2535) ([b9916c41](https://github.com/hyperium/hyper/commit/b9916c410182c6225e857f0cded355ea1b74c865)) * **error:** add `Error::is_parse_too_large` and `Error::is_parse_status` methods (#2538) ([960a69a5](https://github.com/hyperium/hyper/commit/960a69a5878ede82c56f50ac1444a9e75e885a8f)) * **http2:** - * Implement Client-side CONNECT support over HTTP/2 (#2523) ([5442b6fa](https://github.com/hyperium/hyper/commit/5442b6faddaff9aeb7c073031a3b7aa4497fda4d), closes [#2508](https://github.com/hyperium/hyper/issues/2508)) + * Implement Client and Server CONNECT support over HTTP/2 (#2523) ([5442b6fa](https://github.com/hyperium/hyper/commit/5442b6faddaff9aeb7c073031a3b7aa4497fda4d), closes [#2508](https://github.com/hyperium/hyper/issues/2508)) * allow HTTP/2 requests by ALPN when http2_only is unset (#2527) ([be9677a1](https://github.com/hyperium/hyper/commit/be9677a1e782d33c4402772e0fc4ef0a4c49d507)) diff --git a/Cargo.toml b/Cargo.toml index be44c3081f..688b209ad3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.8" # don't forget to update html_root_url +version = "0.14.9" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From ea8b0cd86efe7543d9f6d7d368b2596c7341fa76 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 7 Jun 2021 15:50:49 -0700 Subject: [PATCH 078/420] refactor(error): remove PartialEq derives for error kind enums Replaced the comparisons with `matches!`. This should reduce a bit of code generation that isn't really needed. --- src/error.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/error.rs b/src/error.rs index c0d9622d50..db0c536b57 100644 --- a/src/error.rs +++ b/src/error.rs @@ -17,11 +17,12 @@ struct ErrorImpl { cause: Option, } -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub(super) enum Kind { Parse(Parse), User(User), /// A message reached EOF, but is not complete. + #[allow(unused)] IncompleteMessage, /// A connection received a message (or bytes) when not waiting for one. #[cfg(feature = "http1")] @@ -34,6 +35,7 @@ pub(super) enum Kind { #[cfg(any(feature = "http1", feature = "http2"))] Io, /// Error occurred while connecting. + #[allow(unused)] Connect, /// Error creating a TcpListener. #[cfg(all( @@ -63,7 +65,7 @@ pub(super) enum Kind { Http2, } -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub(super) enum Parse { Method, Version, @@ -77,7 +79,7 @@ pub(super) enum Parse { Internal, } -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub(super) enum User { /// Error calling user's HttpBody::poll_data(). #[cfg(any(feature = "http1", feature = "http2"))] @@ -152,27 +154,27 @@ impl Error { /// Returns true if this was about a `Request` that was canceled. pub fn is_canceled(&self) -> bool { - self.inner.kind == Kind::Canceled + matches!(self.inner.kind, Kind::Canceled) } /// Returns true if a sender's channel is closed. pub fn is_closed(&self) -> bool { - self.inner.kind == Kind::ChannelClosed + matches!(self.inner.kind, Kind::ChannelClosed) } /// Returns true if this was an error from `Connect`. pub fn is_connect(&self) -> bool { - self.inner.kind == Kind::Connect + matches!(self.inner.kind, Kind::Connect) } /// Returns true if the connection closed before a message could complete. pub fn is_incomplete_message(&self) -> bool { - self.inner.kind == Kind::IncompleteMessage + matches!(self.inner.kind, Kind::IncompleteMessage) } /// Returns true if the body write was aborted. pub fn is_body_write_aborted(&self) -> bool { - self.inner.kind == Kind::BodyWriteAborted + matches!(self.inner.kind, Kind::BodyWriteAborted) } /// Returns true if the error was caused by a timeout. From 08b2138e4036c5ae3e4c6f5c85763d45fb869922 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 8 Jun 2021 09:44:35 -0700 Subject: [PATCH 079/420] refactor(error): add header parse error details in `hyper::Error` When a header parse error is because of content-length or transfer-encoding semantics, include a better error message in the `hyper::Error`. --- src/error.rs | 44 +++++++++++++++++++++++++++++++++++++++++--- src/proto/h1/role.rs | 16 ++++++++-------- 2 files changed, 49 insertions(+), 11 deletions(-) diff --git a/src/error.rs b/src/error.rs index db0c536b57..3eb6243701 100644 --- a/src/error.rs +++ b/src/error.rs @@ -72,13 +72,24 @@ pub(super) enum Parse { #[cfg(feature = "http1")] VersionH2, Uri, - Header, + Header(Header), TooLarge, Status, #[cfg_attr(debug_assertions, allow(unused))] Internal, } +#[derive(Debug)] +pub(super) enum Header { + Token, + #[cfg(feature = "http1")] + ContentLengthInvalid, + #[cfg(feature = "http1")] + TransferEncodingInvalid, + #[cfg(feature = "http1")] + TransferEncodingUnexpected, +} + #[derive(Debug)] pub(super) enum User { /// Error calling user's HttpBody::poll_data(). @@ -375,7 +386,19 @@ impl Error { #[cfg(feature = "http1")] Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", Kind::Parse(Parse::Uri) => "invalid URI", - Kind::Parse(Parse::Header) => "invalid HTTP header parsed", + Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { + "invalid content-length parsed" + } + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { + "invalid transfer-encoding parsed" + } + #[cfg(feature = "http1")] + Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { + "unexpected transfer-encoding parsed" + } Kind::Parse(Parse::TooLarge) => "message head is too large", Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", Kind::Parse(Parse::Internal) => { @@ -475,13 +498,28 @@ impl From for Error { } } +#[cfg(feature = "http1")] +impl Parse { + pub(crate) fn content_length_invalid() -> Self { + Parse::Header(Header::ContentLengthInvalid) + } + + pub(crate) fn transfer_encoding_invalid() -> Self { + Parse::Header(Header::TransferEncodingInvalid) + } + + pub(crate) fn transfer_encoding_unexpected() -> Self { + Parse::Header(Header::TransferEncodingUnexpected) + } +} + impl From for Parse { fn from(err: httparse::Error) -> Parse { match err { httparse::Error::HeaderName | httparse::Error::HeaderValue | httparse::Error::NewLine - | httparse::Error::Token => Parse::Header, + | httparse::Error::Token => Parse::Header(Header::Token), httparse::Error::Status => Parse::Status, httparse::Error::TooManyHeaders => Parse::TooLarge, httparse::Error::Version => Parse::Version, diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 0d59edf2d6..f76f6cfef6 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -205,7 +205,7 @@ impl Http1Transaction for Server { // malformed. A server should respond with 400 Bad Request. if !is_http_11 { debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - return Err(Parse::Header); + return Err(Parse::transfer_encoding_unexpected()); } is_te = true; if headers::is_chunked_(&value) { @@ -221,15 +221,15 @@ impl Http1Transaction for Server { } let len = value .to_str() - .map_err(|_| Parse::Header) - .and_then(|s| s.parse().map_err(|_| Parse::Header))?; + .map_err(|_| Parse::content_length_invalid()) + .and_then(|s| s.parse().map_err(|_| Parse::content_length_invalid()))?; if let Some(prev) = con_len { if prev != len { debug!( "multiple Content-Length headers with different values: [{}, {}]", prev, len, ); - return Err(Parse::Header); + return Err(Parse::content_length_invalid()); } // we don't need to append this secondary length continue; @@ -267,7 +267,7 @@ impl Http1Transaction for Server { if is_te && !is_te_chunked { debug!("request with transfer-encoding header, but not chunked, bad request"); - return Err(Parse::Header); + return Err(Parse::transfer_encoding_invalid()); } let mut extensions = http::Extensions::default(); @@ -386,7 +386,7 @@ impl Http1Transaction for Server { use crate::error::Kind; let status = match *err.kind() { Kind::Parse(Parse::Method) - | Kind::Parse(Parse::Header) + | Kind::Parse(Parse::Header(_)) | Kind::Parse(Parse::Uri) | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, @@ -1106,7 +1106,7 @@ impl Client { // malformed. A server should respond with 400 Bad Request. if inc.version == Version::HTTP_10 { debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - Err(Parse::Header) + Err(Parse::transfer_encoding_unexpected()) } else if headers::transfer_encoding_is_chunked(&inc.headers) { Ok(Some((DecodedLength::CHUNKED, false))) } else { @@ -1117,7 +1117,7 @@ impl Client { Ok(Some((DecodedLength::checked_new(len)?, false))) } else if inc.headers.contains_key(header::CONTENT_LENGTH) { debug!("illegal Content-Length header"); - Err(Parse::Header) + Err(Parse::content_length_invalid()) } else { trace!("neither Transfer-Encoding nor Content-Length"); Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) From 8c89a8c1665b6fbec3f13b8c0e84c79464179c89 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 11 Jun 2021 14:12:56 -0700 Subject: [PATCH 080/420] feat(ffi): add option to get raw headers from response --- capi/include/hyper.h | 26 ++++++++++++++++++++++++++ src/client/conn.rs | 14 ++++++++++++++ src/ffi/body.rs | 2 +- src/ffi/client.rs | 14 ++++++++++++++ src/ffi/http_types.rs | 24 +++++++++++++++++++++++- src/proto/h1/conn.rs | 11 +++++++++++ src/proto/h1/io.rs | 4 ++++ src/proto/h1/mod.rs | 2 ++ src/proto/h1/role.rs | 37 +++++++++++++++++++++++++++++++++++++ 9 files changed, 132 insertions(+), 2 deletions(-) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index a305dc4a09..452512362e 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -371,6 +371,17 @@ void hyper_clientconn_options_exec(struct hyper_clientconn_options *opts, */ enum hyper_code hyper_clientconn_options_http2(struct hyper_clientconn_options *opts, int enabled); +/* + Set the whether to include a copy of the raw headers in responses + received on this connection. + + Pass `0` to disable, `1` to enable. + + If enabled, see `hyper_response_headers_raw()` for usage. + */ +enum hyper_code hyper_clientconn_options_headers_raw(struct hyper_clientconn_options *opts, + int enabled); + /* Frees a `hyper_error`. */ @@ -475,6 +486,21 @@ const uint8_t *hyper_response_reason_phrase(const struct hyper_response *resp); */ size_t hyper_response_reason_phrase_len(const struct hyper_response *resp); +/* + Get a reference to the full raw headers of this response. + + You must have enabled `hyper_clientconn_options_headers_raw()`, or this + will return NULL. + + The returned `hyper_buf *` is just a reference, owned by the response. + You need to make a copy if you wish to use it after freeing the + response. + + The buffer is not null-terminated, see the `hyper_buf` functions for + getting the bytes and length. + */ +const struct hyper_buf *hyper_response_headers_raw(const struct hyper_response *resp); + /* Get the HTTP version used by this response. diff --git a/src/client/conn.rs b/src/client/conn.rs index c6170007a2..c557ee29c2 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -147,6 +147,8 @@ pub struct Builder { h1_preserve_header_case: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, + #[cfg(feature = "ffi")] + h1_headers_raw: bool, #[cfg(feature = "http2")] h2_builder: proto::h2::client::Config, version: Proto, @@ -528,6 +530,8 @@ impl Builder { h1_title_case_headers: false, h1_preserve_header_case: false, h1_max_buf_size: None, + #[cfg(feature = "ffi")] + h1_headers_raw: false, #[cfg(feature = "http2")] h2_builder: Default::default(), #[cfg(feature = "http1")] @@ -588,6 +592,12 @@ impl Builder { self } + #[cfg(feature = "ffi")] + pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self { + self.h1_headers_raw = enabled; + self + } + /// Sets whether HTTP2 is required. /// /// Default is false. @@ -773,6 +783,10 @@ impl Builder { if opts.h09_responses { conn.set_h09_responses(); } + + #[cfg(feature = "ffi")] + conn.set_raw_headers(opts.h1_headers_raw); + if let Some(sz) = opts.h1_read_buf_exact_size { conn.set_read_buf_exact_size(sz); } diff --git a/src/ffi/body.rs b/src/ffi/body.rs index f942b769e5..d6e1394371 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -14,7 +14,7 @@ use crate::body::{Body, Bytes, HttpBody as _}; pub struct hyper_body(pub(super) Body); /// A buffer of bytes that is sent or received on a `hyper_body`. -pub struct hyper_buf(pub(super) Bytes); +pub struct hyper_buf(pub(crate) Bytes); pub(crate) struct UserBody { data_func: hyper_body_data_callback, diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 9be4f5a04d..6fa8862ddd 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -159,3 +159,17 @@ ffi_fn! { } } } + +ffi_fn! { + /// Set the whether to include a copy of the raw headers in responses + /// received on this connection. + /// + /// Pass `0` to disable, `1` to enable. + /// + /// If enabled, see `hyper_response_headers_raw()` for usage. + fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { + let opts = unsafe { &mut *opts }; + opts.builder.http1_headers_raw(enabled != 0); + hyper_code::HYPERE_OK + } +} diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index 924944835b..e192e4bc0a 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -2,7 +2,7 @@ use bytes::Bytes; use libc::{c_int, size_t}; use std::ffi::c_void; -use super::body::hyper_body; +use super::body::{hyper_body, hyper_buf}; use super::error::hyper_code; use super::task::{hyper_task_return_type, AsTaskType}; use super::HYPER_ITER_CONTINUE; @@ -27,6 +27,8 @@ pub struct hyper_headers { #[derive(Debug)] pub(crate) struct ReasonPhrase(pub(crate) Bytes); +pub(crate) struct RawHeaders(pub(crate) hyper_buf); + // ===== impl hyper_request ===== ffi_fn! { @@ -178,6 +180,26 @@ ffi_fn! { } } +ffi_fn! { + /// Get a reference to the full raw headers of this response. + /// + /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this + /// will return NULL. + /// + /// The returned `hyper_buf *` is just a reference, owned by the response. + /// You need to make a copy if you wish to use it after freeing the + /// response. + /// + /// The buffer is not null-terminated, see the `hyper_buf` functions for + /// getting the bytes and length. + fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { + match unsafe { &*resp }.0.extensions().get::() { + Some(raw) => &raw.0, + None => std::ptr::null(), + } + } ?= std::ptr::null() +} + ffi_fn! { /// Get the HTTP version used by this response. /// diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index dd5397880d..cb8fdd0ac9 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -49,6 +49,8 @@ where preserve_header_case: false, title_case_headers: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, notify_read: false, reading: Reading::Init, writing: Writing::Init, @@ -98,6 +100,11 @@ where self.state.allow_half_close = true; } + #[cfg(feature = "ffi")] + pub(crate) fn set_raw_headers(&mut self, enabled: bool) { + self.state.raw_headers = enabled; + } + pub(crate) fn into_inner(self) -> (I, Bytes) { self.io.into_inner() } @@ -162,6 +169,8 @@ where h1_parser_config: self.state.h1_parser_config.clone(), preserve_header_case: self.state.preserve_header_case, h09_responses: self.state.h09_responses, + #[cfg(feature = "ffi")] + raw_headers: self.state.raw_headers, } )) { Ok(msg) => msg, @@ -766,6 +775,8 @@ struct State { preserve_header_case: bool, title_case_headers: bool, h09_responses: bool, + #[cfg(feature = "ffi")] + raw_headers: bool, /// Set to true when the Dispatcher should poll read operations /// again. See the `maybe_notify` method for more. notify_read: bool, diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 4e6efba680..4adc6c4419 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -167,6 +167,8 @@ where h1_parser_config: parse_ctx.h1_parser_config.clone(), preserve_header_case: parse_ctx.preserve_header_case, h09_responses: parse_ctx.h09_responses, + #[cfg(feature = "ffi")] + raw_headers: parse_ctx.raw_headers, }, )? { Some(msg) => { @@ -675,6 +677,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; assert!(buffered .parse::(cx, parse_ctx) diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 3871277c25..b3f62f911b 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -74,6 +74,8 @@ pub(crate) struct ParseContext<'a> { h1_parser_config: ParserConfig, preserve_header_case: bool, h09_responses: bool, + #[cfg(feature = "ffi")] + raw_headers: bool, } /// Passed to Http1Transaction::encode diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index f76f6cfef6..c8174044b3 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -970,6 +970,11 @@ impl Http1Transaction for Client { #[cfg(not(feature = "ffi"))] drop(reason); + #[cfg(feature = "ffi")] + if ctx.raw_headers { + extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice))); + } + let head = MessageHead { version, subject: status, @@ -1424,6 +1429,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() @@ -1447,6 +1454,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1465,6 +1474,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; Server::parse(&mut raw, ctx).unwrap_err(); } @@ -1481,6 +1492,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: true, + #[cfg(feature = "ffi")] + raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw, H09_RESPONSE); @@ -1499,6 +1512,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); assert_eq!(raw, H09_RESPONSE); @@ -1521,6 +1536,8 @@ mod tests { h1_parser_config, preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); assert_eq!(raw.len(), 0); @@ -1540,6 +1557,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); } @@ -1554,6 +1573,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: true, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }; let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); let orig_headers = parsed_message @@ -1589,6 +1610,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -1605,6 +1628,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect_err(comment) @@ -1820,6 +1845,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, } ) .expect("parse ok") @@ -1836,6 +1863,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -1852,6 +1881,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect_err("parse should err") @@ -2335,6 +2366,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .expect("parse ok") @@ -2415,6 +2448,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() @@ -2451,6 +2486,8 @@ mod tests { h1_parser_config: Default::default(), preserve_header_case: false, h09_responses: false, + #[cfg(feature = "ffi")] + raw_headers: false, }, ) .unwrap() From c60a9dd9c9d69985676fb333eb9ad6486c480d93 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 24 Jun 2021 10:21:21 -0700 Subject: [PATCH 081/420] chore(github): add issue templates --- .github/ISSUE_TEMPLATE/bug-report---.md | 28 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/bug_report.md | 28 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature-request---.md | 20 ++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 20 ++++++++++++++ 4 files changed, 96 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug-report---.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request---.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/bug-report---.md b/.github/ISSUE_TEMPLATE/bug-report---.md new file mode 100644 index 0000000000..62d8ebc1cb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report---.md @@ -0,0 +1,28 @@ +--- +name: "Bug report \U0001F41B" +about: Create a report to help us improve +title: '' +labels: S-bug +assignees: '' + +--- + +**Version** +List the version(s) of `hyper`, and any relevant hyper dependency (such as `h2` if this is related to HTTP/2). + +**Platform** +The output of `uname -a` (UNIX), or version and 32 or 64-bit (Windows) + +**Description** +Enter your issue details here. +One way to structure the description: + +[short summary of the bug] + +I tried this code: + +[code sample that causes the bug] + +I expected to see this happen: [explanation] + +Instead, this happened: [explanation] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..8ed604a446 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,28 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: S-bug +assignees: '' + +--- + +**Version** +List the version(s) of `hyper`, and any relevant hyper dependency (such as `h2` if this is related to HTTP/2). + +**Platform** +The output of `uname -a` (UNIX), or version and 32 or 64-bit (Windows) + +**Description** +Enter your issue details here. +One way to structure the description: + +[short summary of the bug] + +I tried this code: + +[code sample that causes the bug] + +I expected to see this happen: [explanation] + +Instead, this happened: [explanation] diff --git a/.github/ISSUE_TEMPLATE/feature-request---.md b/.github/ISSUE_TEMPLATE/feature-request---.md new file mode 100644 index 0000000000..424e5ba9be --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request---.md @@ -0,0 +1,20 @@ +--- +name: "Feature request \U0001F4A1" +about: Suggest an idea for this project +title: '' +labels: S-feature +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..53f29830e3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: S-feature +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. From 13594c377d4fce2b5bff4749eefebf0370af1268 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 24 Jun 2021 10:23:39 -0700 Subject: [PATCH 082/420] chore(github): merge duplicate issue templates --- .github/ISSUE_TEMPLATE/bug-report---.md | 28 -------------------- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/feature-request---.md | 20 -------------- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- 4 files changed, 2 insertions(+), 50 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug-report---.md delete mode 100644 .github/ISSUE_TEMPLATE/feature-request---.md diff --git a/.github/ISSUE_TEMPLATE/bug-report---.md b/.github/ISSUE_TEMPLATE/bug-report---.md deleted file mode 100644 index 62d8ebc1cb..0000000000 --- a/.github/ISSUE_TEMPLATE/bug-report---.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: "Bug report \U0001F41B" -about: Create a report to help us improve -title: '' -labels: S-bug -assignees: '' - ---- - -**Version** -List the version(s) of `hyper`, and any relevant hyper dependency (such as `h2` if this is related to HTTP/2). - -**Platform** -The output of `uname -a` (UNIX), or version and 32 or 64-bit (Windows) - -**Description** -Enter your issue details here. -One way to structure the description: - -[short summary of the bug] - -I tried this code: - -[code sample that causes the bug] - -I expected to see this happen: [explanation] - -Instead, this happened: [explanation] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 8ed604a446..62d8ebc1cb 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,5 +1,5 @@ --- -name: Bug report +name: "Bug report \U0001F41B" about: Create a report to help us improve title: '' labels: S-bug diff --git a/.github/ISSUE_TEMPLATE/feature-request---.md b/.github/ISSUE_TEMPLATE/feature-request---.md deleted file mode 100644 index 424e5ba9be..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request---.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: "Feature request \U0001F4A1" -about: Suggest an idea for this project -title: '' -labels: S-feature -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 53f29830e3..424e5ba9be 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,5 +1,5 @@ --- -name: Feature request +name: "Feature request \U0001F4A1" about: Suggest an idea for this project title: '' labels: S-feature From 11cb4725ad2065ce1650b7187be3d918219176c5 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 7 Jul 2021 09:56:56 -0700 Subject: [PATCH 083/420] refactor(http2): fix unstable name clash of Cursor::remaining --- src/proto/h2/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index b410bab60c..4b66b1880f 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -255,7 +255,7 @@ impl Buf for SendBuf { fn remaining(&self) -> usize { match *self { Self::Buf(ref b) => b.remaining(), - Self::Cursor(ref c) => c.remaining(), + Self::Cursor(ref c) => Buf::remaining(c), Self::None => 0, } } From 1068b994df1aec2a0a562224965a80ddced50e60 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 1 Jul 2021 12:34:38 -0700 Subject: [PATCH 084/420] fix(http1): protect against overflow in chunked decoder The HTTP/1 chunked decoder, when decoding the size of a chunk, could overflow the size if the hex digits were too large. This fixes it by adding an overflow check in the decoder. See GHSA-5h46-h7hh-c6x9 --- src/proto/h1/decode.rs | 29 ++++++++++++++++++++++------- tests/server.rs | 29 +++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index dad015e092..5d1c1c3c1b 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -208,19 +208,32 @@ impl ChunkedState { size: &mut u64, ) -> Poll> { trace!("Read chunk hex size"); + + macro_rules! or_overflow { + ($e:expr) => ( + match $e { + Some(val) => val, + None => return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk size: overflow", + ))), + } + ) + } + let radix = 16; match byte!(rdr, cx) { b @ b'0'..=b'9' => { - *size *= radix; - *size += (b - b'0') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b - b'0') as u64)); } b @ b'a'..=b'f' => { - *size *= radix; - *size += (b + 10 - b'a') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); } b @ b'A'..=b'F' => { - *size *= radix; - *size += (b + 10 - b'A') as u64; + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); } b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), b';' => return Poll::Ready(Ok(ChunkedState::Extension)), @@ -449,7 +462,7 @@ mod tests { #[tokio::test] async fn test_read_chunk_size() { - use std::io::ErrorKind::{InvalidInput, UnexpectedEof}; + use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; async fn read(s: &str) -> u64 { let mut state = ChunkedState::Size; @@ -524,6 +537,8 @@ mod tests { read_err("1 invalid extension\r\n", InvalidInput).await; read_err("1 A\r\n", InvalidInput).await; read_err("1;no CRLF", UnexpectedEof).await; + // Overflow + read_err("f0000000000000003\r\n", InvalidData).await; } #[tokio::test] diff --git a/tests/server.rs b/tests/server.rs index 297b09ac73..ce65849f90 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -431,6 +431,35 @@ fn post_with_chunked_body() { assert_eq!(server.body(), b"qwert"); } +#[test] +fn post_with_chunked_overflow() { + let server = serve(); + let mut req = connect(server.addr()); + req.write_all( + b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + f0000000000000003\r\n\ + abc\r\n\ + 0\r\n\ + \r\n\ + GET /sneaky HTTP/1.1\r\n\ + \r\n\ + ", + ) + .unwrap(); + req.read(&mut [0; 256]).unwrap(); + + let err = server.body_err().to_string(); + assert!( + err.contains("overflow"), + "error should be overflow: {:?}", + err + ); +} + #[test] fn post_with_incomplete_body() { let _ = pretty_env_logger::try_init(); From 1fb719e0b61a4f3d911562a436a2ff05fd7cb759 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 1 Jul 2021 12:36:41 -0700 Subject: [PATCH 085/420] fix(http1): reject content-lengths that have a plus sign prefix The HTTP/1 content-length parser would accept lengths that were prefixed with a plus sign (for example, `+1234`). The specification restricts the content-length header to only allow DIGITs, making such a content-length illegal. Since some HTTP implementations protect against that, and others mis-interpret the length when the plus sign is present, this fixes hyper to always reject such content lengths. See GHSA-f3pg-qwvg-p99c --- src/headers.rs | 31 +++++++++++++++++++++++++++++-- src/proto/h1/role.rs | 24 ++++++++++++++++++++---- tests/server.rs | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 6 deletions(-) diff --git a/src/headers.rs b/src/headers.rs index 0d16cdfd5a..8407be185f 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -30,7 +30,7 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool { #[cfg(all(feature = "http1", feature = "server"))] pub(super) fn content_length_parse(value: &HeaderValue) -> Option { - value.to_str().ok().and_then(|s| s.parse().ok()) + from_digits(value.as_bytes()) } pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { @@ -46,7 +46,7 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> for h in values { if let Ok(line) = h.to_str() { for v in line.split(',') { - if let Some(n) = v.trim().parse().ok() { + if let Some(n) = from_digits(v.trim().as_bytes()) { if content_length.is_none() { content_length = Some(n) } else if content_length != Some(n) { @@ -64,6 +64,33 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> return content_length } +fn from_digits(bytes: &[u8]) -> Option { + // cannot use FromStr for u64, since it allows a signed prefix + let mut result = 0u64; + const RADIX: u64 = 10; + + if bytes.is_empty() { + return None; + } + + for &b in bytes { + // can't use char::to_digit, since we haven't verified these bytes + // are utf-8. + match b { + b'0'..=b'9' => { + result = result.checked_mul(RADIX)?; + result = result.checked_add((b - b'0') as u64)?; + }, + _ => { + // not a DIGIT, get outta here! + return None; + } + } + } + + Some(result) +} + #[cfg(all(feature = "http2", feature = "client"))] pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { match *method { diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index c8174044b3..098febab3e 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -219,10 +219,8 @@ impl Http1Transaction for Server { if is_te { continue; } - let len = value - .to_str() - .map_err(|_| Parse::content_length_invalid()) - .and_then(|s| s.parse().map_err(|_| Parse::content_length_invalid()))?; + let len = headers::content_length_parse(&value) + .ok_or_else(Parse::content_length_invalid)?; if let Some(prev) = con_len { if prev != len { debug!( @@ -1775,6 +1773,16 @@ mod tests { "multiple content-lengths", ); + // content-length with prefix is not allowed + parse_err( + "\ + POST / HTTP/1.1\r\n\ + content-length: +10\r\n\ + \r\n\ + ", + "prefixed content-length", + ); + // transfer-encoding that isn't chunked is an error parse_err( "\ @@ -1958,6 +1966,14 @@ mod tests { ", ); + parse_err( + "\ + HTTP/1.1 200 OK\r\n\ + content-length: +8\r\n\ + \r\n\ + ", + ); + // transfer-encoding: chunked assert_eq!( parse( diff --git a/tests/server.rs b/tests/server.rs index ce65849f90..624c1eb8e7 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -405,6 +405,44 @@ fn get_chunked_response_with_ka() { read_until(&mut req, |buf| buf.ends_with(quux)).expect("reading 2"); } +#[test] +fn post_with_content_length_body() { + let server = serve(); + let mut req = connect(server.addr()); + req.write_all( + b"\ + POST / HTTP/1.1\r\n\ + Content-Length: 5\r\n\ + \r\n\ + hello\ + ", + ) + .unwrap(); + req.read(&mut [0; 256]).unwrap(); + + assert_eq!(server.body(), b"hello"); +} + +#[test] +fn post_with_invalid_prefix_content_length() { + let server = serve(); + let mut req = connect(server.addr()); + req.write_all( + b"\ + POST / HTTP/1.1\r\n\ + Content-Length: +5\r\n\ + \r\n\ + hello\ + ", + ) + .unwrap(); + + let mut buf = [0; 256]; + let _n = req.read(&mut buf).unwrap(); + let expected = "HTTP/1.1 400 Bad Request\r\n"; + assert_eq!(s(&buf[..expected.len()]), expected); +} + #[test] fn post_with_chunked_body() { let server = serve(); From 9b69cbc2543e437f17c4a028b734f55928358675 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 7 Jul 2021 10:26:36 -0700 Subject: [PATCH 086/420] v0.14.10 --- CHANGELOG.md | 15 +++++++++++++++ Cargo.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcf18b3895..8aa87e008b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +### v0.14.10 (2021-07-07) + + +#### Bug Fixes + +* **http1:** + * reject content-lengths that have a plus sign prefix ([06335158](https://github.com/hyperium/hyper/commit/06335158ca48724db9bf074398067d2db08613e7)) + * protect against overflow in chunked decoder ([efd9a982](https://github.com/hyperium/hyper/commit/efd9a9821fd2f1ae04b545094de76a435b62e70f)) + + +#### Features + +* **ffi:** add option to get raw headers from response ([8c89a8c1](https://github.com/hyperium/hyper/commit/8c89a8c1665b6fbec3f13b8c0e84c79464179c89)) + + ### v0.14.9 (2021-06-07) diff --git a/Cargo.toml b/Cargo.toml index 688b209ad3..1332aa448f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.9" # don't forget to update html_root_url +version = "0.14.10" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From 1cd40b7e26e6367a2693aa4bb540f5b2816fea8c Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 7 Jul 2021 11:49:24 -0700 Subject: [PATCH 087/420] docs(README): update crates.io badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d6a4584de8..c3c73d7ed7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # [hyper](https://hyper.rs) -[![crates.io](https://meritbadge.herokuapp.com/hyper)](https://crates.io/crates/hyper) +[![crates.io](https://img.shields.io/crates/v/hyper.svg)](https://crates.io/crates/hyper) [![Released API docs](https://docs.rs/hyper/badge.svg)](https://docs.rs/hyper) [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) [![CI](https://github.com/hyperium/hyper/workflows/CI/badge.svg)](https://github.com/hyperium/hyper/actions?query=workflow%3ACI) From 25d18c0b74ccf9e51f986daa3b2b98c0109f827a Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 8 Jul 2021 11:32:39 -0700 Subject: [PATCH 088/420] feat(ffi): add hyper_request_on_informational This defines an extension type used in requests for the client that is used to setup a callback for receipt of informational (1xx) responses. The type isn't currently public, and is only usable in the C API. --- capi/examples/upload.c | 17 ++++++++++++++-- capi/include/hyper.h | 23 +++++++++++++++++++++ src/ffi/http_types.rs | 44 +++++++++++++++++++++++++++++++++++++++- src/ffi/mod.rs | 1 + src/proto/h1/conn.rs | 23 +++++++++++++++++++++ src/proto/h1/dispatch.rs | 6 +----- src/proto/h1/io.rs | 4 ++++ src/proto/h1/mod.rs | 2 ++ src/proto/h1/role.rs | 39 +++++++++++++++++++++++++++++++++++ src/proto/mod.rs | 11 ++++++++++ 10 files changed, 162 insertions(+), 8 deletions(-) diff --git a/capi/examples/upload.c b/capi/examples/upload.c index ed6f37a709..caf56aafd3 100644 --- a/capi/examples/upload.c +++ b/capi/examples/upload.c @@ -148,6 +148,16 @@ static int print_each_header(void *userdata, return HYPER_ITER_CONTINUE; } +static void print_informational(void *userdata, hyper_response *resp) { + uint16_t http_status = hyper_response_status(resp); + + printf("\nInformational (1xx): %d\n", http_status); + + hyper_headers *headers = hyper_response_headers(resp); + hyper_headers_foreach(headers, print_each_header, NULL); + printf("\n"); +} + typedef enum { EXAMPLE_NOT_SET = 0, // tasks we don't know about won't have a userdata set EXAMPLE_HANDSHAKE, @@ -172,7 +182,7 @@ int main(int argc, char *argv[]) { upload.fd = open(file, O_RDONLY); if (upload.fd < 0) { - printf("error opening file to upload: %d", errno); + printf("error opening file to upload: %s\n", strerror(errno)); return 1; } printf("connecting to port %s on %s...\n", port, host); @@ -262,7 +272,10 @@ int main(int argc, char *argv[]) { } hyper_headers *req_headers = hyper_request_headers(req); - hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); + hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); + hyper_headers_set(req_headers, STR_ARG("expect"), STR_ARG("100-continue")); + + hyper_request_on_informational(req, print_informational, NULL); // Prepare the req body hyper_body *body = hyper_body_new(); diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 452512362e..fc3b71e71d 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -207,6 +207,8 @@ typedef int (*hyper_body_foreach_callback)(void*, const struct hyper_buf*); typedef int (*hyper_body_data_callback)(void*, struct hyper_context*, struct hyper_buf**); +typedef void (*hyper_request_on_informational_callback)(void*, const struct hyper_response*); + typedef int (*hyper_headers_foreach_callback)(void*, const uint8_t*, size_t, const uint8_t*, size_t); typedef size_t (*hyper_io_read_callback)(void*, struct hyper_context*, uint8_t*, size_t); @@ -454,6 +456,27 @@ struct hyper_headers *hyper_request_headers(struct hyper_request *req); */ enum hyper_code hyper_request_set_body(struct hyper_request *req, struct hyper_body *body); +/* + Set an informational (1xx) response callback. + + The callback is called each time hyper receives an informational (1xx) + response for this request. + + The third argument is an opaque user data pointer, which is passed to + the callback each time. + + The callback is passed the `void *` data pointer, and a + `hyper_response *` which can be inspected as any other response. The + body of the response will always be empty. + + NOTE: The `const hyper_response *` is just borrowed data, and will not + be valid after the callback finishes. You must copy any data you wish + to persist. + */ +enum hyper_code hyper_request_on_informational(struct hyper_request *req, + hyper_request_on_informational_callback callback, + void *data); + /* Free an HTTP response after using it. */ diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index e192e4bc0a..e26557cebf 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -5,7 +5,7 @@ use std::ffi::c_void; use super::body::{hyper_body, hyper_buf}; use super::error::hyper_code; use super::task::{hyper_task_return_type, AsTaskType}; -use super::HYPER_ITER_CONTINUE; +use super::{UserDataPointer, HYPER_ITER_CONTINUE}; use crate::ext::HeaderCaseMap; use crate::header::{HeaderName, HeaderValue}; use crate::{Body, HeaderMap, Method, Request, Response, Uri}; @@ -29,6 +29,13 @@ pub(crate) struct ReasonPhrase(pub(crate) Bytes); pub(crate) struct RawHeaders(pub(crate) hyper_buf); +pub(crate) struct OnInformational { + func: hyper_request_on_informational_callback, + data: UserDataPointer, +} + +type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *const hyper_response); + // ===== impl hyper_request ===== ffi_fn! { @@ -129,6 +136,32 @@ ffi_fn! { } } +ffi_fn! { + /// Set an informational (1xx) response callback. + /// + /// The callback is called each time hyper receives an informational (1xx) + /// response for this request. + /// + /// The third argument is an opaque user data pointer, which is passed to + /// the callback each time. + /// + /// The callback is passed the `void *` data pointer, and a + /// `hyper_response *` which can be inspected as any other response. The + /// body of the response will always be empty. + /// + /// NOTE: The `const hyper_response *` is just borrowed data, and will not + /// be valid after the callback finishes. You must copy any data you wish + /// to persist. + fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code { + let ext = OnInformational { + func: callback, + data: UserDataPointer(data), + }; + unsafe { &mut *req }.0.extensions_mut().insert(ext); + hyper_code::HYPERE_OK + } +} + impl hyper_request { pub(super) fn finalize_request(&mut self) { if let Some(headers) = self.0.extensions_mut().remove::() { @@ -394,6 +427,15 @@ unsafe fn raw_name_value( Ok((name, value, orig_name)) } +// ===== impl OnInformational ===== + +impl OnInformational { + pub(crate) fn call(&mut self, resp: Response) { + let mut resp = hyper_response(resp); + (self.func)(self.data.0, &mut resp); + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 83011ff0fc..fd67a880a6 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -81,6 +81,7 @@ struct UserDataPointer(*mut std::ffi::c_void); // We don't actually know anything about this pointer, it's up to the user // to do the right thing. unsafe impl Send for UserDataPointer {} +unsafe impl Sync for UserDataPointer {} /// cbindgen:ignore static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index cb8fdd0ac9..f3eb01addb 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -50,6 +50,8 @@ where title_case_headers: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: None, + #[cfg(feature = "ffi")] raw_headers: false, notify_read: false, reading: Reading::Init, @@ -170,6 +172,8 @@ where preserve_header_case: self.state.preserve_header_case, h09_responses: self.state.h09_responses, #[cfg(feature = "ffi")] + on_informational: &mut self.state.on_informational, + #[cfg(feature = "ffi")] raw_headers: self.state.raw_headers, } )) { @@ -185,6 +189,12 @@ where // Prevent accepting HTTP/0.9 responses after the initial one, if any. self.state.h09_responses = false; + // Drop any OnInformational callbacks, we're done there! + #[cfg(feature = "ffi")] + { + self.state.on_informational = None; + } + self.state.busy(); self.state.keep_alive &= msg.keep_alive; self.state.version = msg.head.version; @@ -525,6 +535,14 @@ where debug_assert!(self.state.cached_headers.is_none()); debug_assert!(head.headers.is_empty()); self.state.cached_headers = Some(head.headers); + + #[cfg(feature = "ffi")] + { + self.state.on_informational = head + .extensions + .remove::(); + } + Some(encoder) } Err(err) => { @@ -775,6 +793,11 @@ struct State { preserve_header_case: bool, title_case_headers: bool, h09_responses: bool, + /// If set, called with each 1xx informational response received for + /// the current request. MUST be unset after a non-1xx response is + /// received. + #[cfg(feature = "ffi")] + on_informational: Option, #[cfg(feature = "ffi")] raw_headers: bool, /// Set to true when the Dispatcher should poll read operations diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 1a72450b15..51fabc63ad 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -598,11 +598,7 @@ cfg_client! { match msg { Ok((msg, body)) => { if let Some(cb) = self.callback.take() { - let mut res = http::Response::new(body); - *res.status_mut() = msg.subject; - *res.headers_mut() = msg.headers; - *res.version_mut() = msg.version; - *res.extensions_mut() = msg.extensions; + let res = msg.into_response(body); cb.send(Ok(res)); Ok(()) } else { diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 4adc6c4419..2ff3d5a48a 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -168,6 +168,8 @@ where preserve_header_case: parse_ctx.preserve_header_case, h09_responses: parse_ctx.h09_responses, #[cfg(feature = "ffi")] + on_informational: parse_ctx.on_informational, + #[cfg(feature = "ffi")] raw_headers: parse_ctx.raw_headers, }, )? { @@ -678,6 +680,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; assert!(buffered diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index b3f62f911b..758ac7b073 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -75,6 +75,8 @@ pub(crate) struct ParseContext<'a> { preserve_header_case: bool, h09_responses: bool, #[cfg(feature = "ffi")] + on_informational: &'a mut Option, + #[cfg(feature = "ffi")] raw_headers: bool, } diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 098febab3e..8828b94a0a 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -991,6 +991,13 @@ impl Http1Transaction for Client { })); } + #[cfg(feature = "ffi")] + if head.subject.is_informational() { + if let Some(callback) = ctx.on_informational { + callback.call(head.into_response(crate::Body::empty())); + } + } + // Parsing a 1xx response could have consumed the buffer, check if // it is empty now... if buf.is_empty() { @@ -1428,6 +1435,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -1453,6 +1462,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); @@ -1473,6 +1484,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; Server::parse(&mut raw, ctx).unwrap_err(); @@ -1491,6 +1504,8 @@ mod tests { preserve_header_case: false, h09_responses: true, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); @@ -1511,6 +1526,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); @@ -1535,6 +1552,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); @@ -1556,6 +1575,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; Client::parse(&mut raw, ctx).unwrap_err(); @@ -1572,6 +1593,8 @@ mod tests { preserve_header_case: true, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }; let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); @@ -1609,6 +1632,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -1627,6 +1652,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -1854,6 +1881,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, } ) @@ -1872,6 +1901,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -1890,6 +1921,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -2383,6 +2416,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -2465,6 +2500,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) @@ -2503,6 +2540,8 @@ mod tests { preserve_header_case: false, h09_responses: false, #[cfg(feature = "ffi")] + on_informational: &mut None, + #[cfg(feature = "ffi")] raw_headers: false, }, ) diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 6f422078f6..513b70f86f 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -57,3 +57,14 @@ pub(crate) enum Dispatched { #[cfg(feature = "http1")] Upgrade(crate::upgrade::Pending), } + +impl MessageHead { + fn into_response(self, body: B) -> http::Response { + let mut res = http::Response::new(body); + *res.status_mut() = self.subject; + *res.headers_mut() = self.headers; + *res.version_mut() = self.version; + *res.extensions_mut() = self.extensions; + res + } +} From 5243570137ae49628cb387fff5611eea0add33bf Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Sat, 17 Jul 2021 00:48:45 +0200 Subject: [PATCH 089/420] fix(http2): preserve `proxy-authenticate` and `proxy-authorization` headers (#2597) That Proxy-Authenticate and Proxy-Authorization are forbidden over h2 is not actually specified anywhere, plus h2 also supports CONNECT requests, which are specifically made to do requests over a proxy, and those proxies may require authentication, sometimes through Proxy-Authorization. Note that there is an openwebdocs project that just started to clear up any MDN-induced confusion in implementations: https://github.com/openwebdocs/project/issues/43 --- src/proto/h2/mod.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index 4b66b1880f..ed8fbef74a 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -1,9 +1,6 @@ use bytes::{Buf, Bytes}; use h2::{RecvStream, SendStream}; -use http::header::{ - HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER, - TRANSFER_ENCODING, UPGRADE, -}; +use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; use http::HeaderMap; use pin_project_lite::pin_project; use std::error::Error as StdError; @@ -40,8 +37,6 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { let connection_headers = [ HeaderName::from_lowercase(b"keep-alive").unwrap(), HeaderName::from_lowercase(b"proxy-connection").unwrap(), - PROXY_AUTHENTICATE, - PROXY_AUTHORIZATION, TRAILER, TRANSFER_ENCODING, UPGRADE, From 52214f391c0a18dc66d1ccff9c0c004c5da85002 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 21 Jul 2021 02:06:09 +0200 Subject: [PATCH 090/420] fix(client): retry when pool checkout returns closed HTTP2 connection (#2585) When http2_only is true, we never try to open a new connection if there is one open already, which means that if the existing connection that gets checked out of the pool is closed, then the request won't happen. --- src/client/client.rs | 41 ++++++++++++++++++++++++++++++++++------- src/client/pool.rs | 23 ++++++++++++++++++----- src/error.rs | 2 +- 3 files changed, 53 insertions(+), 13 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index a5d8dcfaf7..f94d4154b8 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -11,7 +11,9 @@ use http::{Method, Request, Response, Uri, Version}; use super::conn; use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; -use super::pool::{self, Key as PoolKey, Pool, Poolable, Pooled, Reservation}; +use super::pool::{ + self, CheckoutIsClosedError, Key as PoolKey, Pool, Poolable, Pooled, Reservation, +}; #[cfg(feature = "tcp")] use super::HttpConnector; use crate::body::{Body, HttpBody}; @@ -223,7 +225,17 @@ where mut req: Request, pool_key: PoolKey, ) -> Result, ClientError> { - let mut pooled = self.connection_for(pool_key).await?; + let mut pooled = match self.connection_for(pool_key).await { + Ok(pooled) => pooled, + Err(ClientConnectError::Normal(err)) => return Err(ClientError::Normal(err)), + Err(ClientConnectError::H2CheckoutIsClosed(reason)) => { + return Err(ClientError::Canceled { + connection_reused: true, + req, + reason, + }) + } + }; if pooled.is_http1() { if req.version() == Version::HTTP_2 { @@ -321,7 +333,7 @@ where async fn connection_for( &self, pool_key: PoolKey, - ) -> Result>, ClientError> { + ) -> Result>, ClientConnectError> { // This actually races 2 different futures to try to get a ready // connection the fastest, and to reduce connection churn. // @@ -337,6 +349,7 @@ where // and then be inserted into the pool as an idle connection. let checkout = self.pool.checkout(pool_key.clone()); let connect = self.connect_to(pool_key); + let is_ver_h2 = self.config.ver == Ver::Http2; // The order of the `select` is depended on below... @@ -380,16 +393,25 @@ where // In both cases, we should just wait for the other future. Either::Left((Err(err), connecting)) => { if err.is_canceled() { - connecting.await.map_err(ClientError::Normal) + connecting.await.map_err(ClientConnectError::Normal) } else { - Err(ClientError::Normal(err)) + Err(ClientConnectError::Normal(err)) } } Either::Right((Err(err), checkout)) => { if err.is_canceled() { - checkout.await.map_err(ClientError::Normal) + checkout.await.map_err(move |err| { + if is_ver_h2 + && err.is_canceled() + && err.find_source::().is_some() + { + ClientConnectError::H2CheckoutIsClosed(err) + } else { + ClientConnectError::Normal(err) + } + }) } else { - Err(ClientError::Normal(err)) + Err(ClientConnectError::Normal(err)) } } } @@ -722,6 +744,11 @@ impl ClientError { } } +enum ClientConnectError { + Normal(crate::Error), + H2CheckoutIsClosed(crate::Error), +} + /// A marker to identify what version a pooled connection is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub(super) enum Ver { diff --git a/src/client/pool.rs b/src/client/pool.rs index 94f73f6afd..9beca9f472 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -1,4 +1,5 @@ use std::collections::{HashMap, HashSet, VecDeque}; +use std::error::Error as StdError; use std::fmt; use std::ops::{Deref, DerefMut}; use std::sync::{Arc, Mutex, Weak}; @@ -560,28 +561,40 @@ pub(super) struct Checkout { waiter: Option>, } +#[derive(Debug)] +pub(super) struct CheckoutIsClosedError; + +impl StdError for CheckoutIsClosedError {} + +impl fmt::Display for CheckoutIsClosedError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("checked out connection was closed") + } +} + impl Checkout { fn poll_waiter( &mut self, cx: &mut task::Context<'_>, ) -> Poll>>> { - static CANCELED: &str = "pool checkout failed"; if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { if value.is_open() { Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) } else { - Poll::Ready(Some(Err(crate::Error::new_canceled().with(CANCELED)))) + Poll::Ready(Some(Err( + crate::Error::new_canceled().with(CheckoutIsClosedError) + ))) } } Poll::Pending => { self.waiter = Some(rx); Poll::Pending } - Poll::Ready(Err(_canceled)) => { - Poll::Ready(Some(Err(crate::Error::new_canceled().with(CANCELED)))) - } + Poll::Ready(Err(_canceled)) => Poll::Ready(Some(Err( + crate::Error::new_canceled().with("request has been canceled") + ))), } } else { Poll::Ready(None) diff --git a/src/error.rs b/src/error.rs index 3eb6243701..cc601ef9d6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -214,7 +214,7 @@ impl Error { &self.inner.kind } - fn find_source(&self) -> Option<&E> { + pub(crate) fn find_source(&self) -> Option<&E> { let mut cause = self.source(); while let Some(err) = cause { if let Some(ref typed) = err.downcast_ref() { From 090ee08b03266491944b136f9fcd96bfaf2015b4 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 20 Jul 2021 17:11:15 -0700 Subject: [PATCH 091/420] refactor(http1): reject newlines in chunked extensions We don't really care what bytes are in chunked extensions. We ignore them until we find a CRLF. However, some other HTTP implementations may only look for a LF, and forget that chunked requires the CR as well. To save them from themselves, this makes hyper reject any chunked extensions that include an LF byte. This isn't a *bug*. No one ever cares what's in the extensions. This is meant as a way to help implementations that don't decoded chunked encoding correctly. This shouldn't affect really anyone in the real world. --- src/proto/h1/decode.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 5d1c1c3c1b..1e6027d906 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -268,8 +268,18 @@ impl ChunkedState { rdr: &mut R, ) -> Poll> { trace!("read_extension"); + // We don't care about extensions really at all. Just ignore them. + // They "end" at the next CRLF. + // + // However, some implementations may not check for the CR, so to save + // them from themselves, we reject extensions containing plain LF as + // well. match byte!(rdr, cx) { b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), + b'\n' => Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk extension contains newline", + ))), _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions } } @@ -537,6 +547,7 @@ mod tests { read_err("1 invalid extension\r\n", InvalidInput).await; read_err("1 A\r\n", InvalidInput).await; read_err("1;no CRLF", UnexpectedEof).await; + read_err("1;reject\nnewlines\r\n", InvalidData).await; // Overflow read_err("f0000000000000003\r\n", InvalidData).await; } From f51c677dec9debf60cb336dc938bae103adf17a0 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 21 Jul 2021 17:17:05 +0200 Subject: [PATCH 092/420] fix(http2): improve I/O errors emitted by H2Upgraded (#2598) When a `CONNECT` over HTTP2 has been established, and the user tries to write data right when the peer closes the stream, it will no longer return as a "user error". The reset code is checked, and converted into an appropriate `io::ErrorKind`. --- src/proto/h2/mod.rs | 45 ++++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index ed8fbef74a..b8312aff64 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -1,5 +1,5 @@ use bytes::{Buf, Bytes}; -use h2::{RecvStream, SendStream}; +use h2::{Reason, RecvStream, SendStream}; use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; use http::HeaderMap; use pin_project_lite::pin_project; @@ -313,7 +313,11 @@ where break buf; } Some(Err(e)) => { - return Poll::Ready(Err(h2_to_io_error(e))); + return Poll::Ready(match e.reason() { + Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), + Some(Reason::STREAM_CLOSED) => Err(io::ErrorKind::BrokenPipe.into()), + _ => Err(h2_to_io_error(e)), + }) } } }; @@ -335,21 +339,36 @@ where cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - if let Poll::Ready(reset) = self.send_stream.poll_reset(cx) { - return Poll::Ready(Err(h2_to_io_error(match reset { - Ok(reason) => reason.into(), - Err(e) => e, - }))); - } if buf.is_empty() { return Poll::Ready(Ok(0)); } self.send_stream.reserve_capacity(buf.len()); - Poll::Ready(match ready!(self.send_stream.poll_capacity(cx)) { - None => Ok(0), - Some(Ok(cnt)) => self.send_stream.write(&buf[..cnt], false).map(|()| cnt), - Some(Err(e)) => Err(h2_to_io_error(e)), - }) + + // We ignore all errors returned by `poll_capacity` and `write`, as we + // will get the correct from `poll_reset` anyway. + let cnt = match ready!(self.send_stream.poll_capacity(cx)) { + None => Some(0), + Some(Ok(cnt)) => self + .send_stream + .write(&buf[..cnt], false) + .ok() + .map(|()| cnt), + Some(Err(_)) => None, + }; + + if let Some(cnt) = cnt { + return Poll::Ready(Ok(cnt)); + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { From f70c8ffc7b949a0c9135bfc95caf676e4dedcd4e Mon Sep 17 00:00:00 2001 From: 0x79756b69 <76654022+0x79756b69@users.noreply.github.com> Date: Thu, 22 Jul 2021 01:09:38 +0900 Subject: [PATCH 093/420] docs(example): add a get query method to params example (#2601) --- examples/params.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/examples/params.rs b/examples/params.rs index 38c9bfabad..87c2368928 100644 --- a/examples/params.rs +++ b/examples/params.rs @@ -68,6 +68,29 @@ async fn param_example(req: Request) -> Result, hyper::Erro let body = format!("Hello {}, your number is {}", name, number); Ok(Response::new(body.into())) } + (&Method::GET, "/get") => { + let query = if let Some(q) = req.uri().query() { + q + } else { + return Ok(Response::builder() + .status(StatusCode::UNPROCESSABLE_ENTITY) + .body(MISSING.into()) + .unwrap()); + }; + let params = form_urlencoded::parse(query.as_bytes()) + .into_owned() + .collect::>(); + let page = if let Some(p) = params.get("page") { + p + } else { + return Ok(Response::builder() + .status(StatusCode::UNPROCESSABLE_ENTITY) + .body(MISSING.into()) + .unwrap()); + }; + let body = format!("You requested {}", page); + Ok(Response::new(body.into())) + } _ => Ok(Response::builder() .status(StatusCode::NOT_FOUND) .body(Body::empty()) From 0112d354263bb73ebefd1980c9581cdc921ebc79 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 21 Jul 2021 08:11:19 -0700 Subject: [PATCH 094/420] refactor(error): mark TransferEncodingInvalid variant only with server feature --- src/error.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/error.rs b/src/error.rs index cc601ef9d6..bd798667ee 100644 --- a/src/error.rs +++ b/src/error.rs @@ -84,7 +84,7 @@ pub(super) enum Header { Token, #[cfg(feature = "http1")] ContentLengthInvalid, - #[cfg(feature = "http1")] + #[cfg(all(feature = "http1", feature = "server"))] TransferEncodingInvalid, #[cfg(feature = "http1")] TransferEncodingUnexpected, @@ -391,7 +391,7 @@ impl Error { Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { "invalid content-length parsed" } - #[cfg(feature = "http1")] + #[cfg(all(feature = "http1", feature = "server"))] Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { "invalid transfer-encoding parsed" } @@ -504,6 +504,7 @@ impl Parse { Parse::Header(Header::ContentLengthInvalid) } + #[cfg(all(feature = "http1", feature = "server"))] pub(crate) fn transfer_encoding_invalid() -> Self { Parse::Header(Header::TransferEncodingInvalid) } From 19f38b3e7febadedbfc558d17fa41baff73c6ecc Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 21 Jul 2021 15:06:07 -0700 Subject: [PATCH 095/420] v0.14.11 --- CHANGELOG.md | 16 ++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8aa87e008b..75b93a427e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +### v0.14.11 (2021-07-21) + + +#### Bug Fixes + +* **client:** retry when pool checkout returns closed HTTP2 connection (#2585) ([52214f39](https://github.com/hyperium/hyper/commit/52214f391c0a18dc66d1ccff9c0c004c5da85002)) +* **http2:** + * improve I/O errors emitted by H2Upgraded (#2598) ([f51c677d](https://github.com/hyperium/hyper/commit/f51c677dec9debf60cb336dc938bae103adf17a0)) + * preserve `proxy-authenticate` and `proxy-authorization` headers (#2597) ([52435701](https://github.com/hyperium/hyper/commit/5243570137ae49628cb387fff5611eea0add33bf)) + + +#### Features + +* **ffi:** add hyper_request_on_informational ([25d18c0b](https://github.com/hyperium/hyper/commit/25d18c0b74ccf9e51f986daa3b2b98c0109f827a)) + + ### v0.14.10 (2021-07-07) diff --git a/Cargo.toml b/Cargo.toml index 1332aa448f..b8e00b3d6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.10" # don't forget to update html_root_url +version = "0.14.11" # don't forget to update html_root_url description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From 6393a0cc031f504dbd50eb4b021bae51e21f6140 Mon Sep 17 00:00:00 2001 From: David Cook Date: Mon, 26 Jul 2021 13:18:50 -0500 Subject: [PATCH 096/420] docs(ffi): fix copy-paste error on hyper_waker_wake (#2604) --- capi/include/hyper.h | 4 +++- src/ffi/task.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index fc3b71e71d..d969f973fe 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -718,7 +718,9 @@ struct hyper_waker *hyper_context_waker(struct hyper_context *cx); void hyper_waker_free(struct hyper_waker *waker); /* - Free a waker that hasn't been woken. + Wake up the task associated with a waker. + + NOTE: This consumes the waker. You should not use or free the waker afterwards. */ void hyper_waker_wake(struct hyper_waker *waker); diff --git a/src/ffi/task.rs b/src/ffi/task.rs index e3fb5f44e8..f92798f0c8 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -418,7 +418,9 @@ ffi_fn! { } ffi_fn! { - /// Free a waker that hasn't been woken. + /// Wake up the task associated with a waker. + /// + /// NOTE: This consumes the waker. You should not use or free the waker afterwards. fn hyper_waker_wake(waker: *mut hyper_waker) { let waker = unsafe { Box::from_raw(waker) }; waker.waker.wake(); From 54b57c4797e1210924d901a665f9d17ae7dd9956 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 30 Jul 2021 15:07:11 -0700 Subject: [PATCH 097/420] chore(dependencies): remove tower-util dev-dependency (#2603) --- Cargo.toml | 3 +-- src/client/tests.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b8e00b3d6a..2cac5eabb0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,8 +66,7 @@ tokio = { version = "1", features = [ ] } tokio-test = "0.4" tokio-util = { version = "0.6", features = ["codec"] } -tower = { version = "0.4", features = ["make"] } -tower-util = "0.3" +tower = { version = "0.4", features = ["make", "util"] } url = "2.2" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies] diff --git a/src/client/tests.rs b/src/client/tests.rs index e955cb60c6..0a281a637d 100644 --- a/src/client/tests.rs +++ b/src/client/tests.rs @@ -7,7 +7,7 @@ use super::Client; #[tokio::test] async fn client_connect_uri_argument() { - let connector = tower_util::service_fn(|dst: http::Uri| { + let connector = tower::service_fn(|dst: http::Uri| { assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP)); assert_eq!(dst.host(), Some("example.local")); assert_eq!(dst.port(), None); From 9d253d31683e1c767f7e8c9a27f29f27f24c7fe5 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 4 Aug 2021 10:40:33 -0700 Subject: [PATCH 098/420] test(client): fix extra semi-colon warning in helper macro --- tests/client.rs | 4 ++-- tests/support/mod.rs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/client.rs b/tests/client.rs index 3eb6dd9015..a2fdcc0cd9 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -344,11 +344,11 @@ macro_rules! __client_req_prop { } macro_rules! __client_req_header { - ($req_builder:ident, { $($name:expr => $val:expr,)* }) => { + ($req_builder:ident, { $($name:expr => $val:expr,)* }) => {{ $( $req_builder = $req_builder.header($name, $val); )* - } + }} } static REPLY_OK: &str = "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"; diff --git a/tests/support/mod.rs b/tests/support/mod.rs index 56ed839106..6b3c8f4472 100644 --- a/tests/support/mod.rs +++ b/tests/support/mod.rs @@ -177,7 +177,7 @@ macro_rules! __internal_eq_prop { (headers: $map:tt) => {{ #[allow(unused_mut)] { - let mut headers = Vec::new(); + let mut headers = Vec::>::new(); __internal_headers_eq!(headers, $map); headers } @@ -216,12 +216,12 @@ macro_rules! __internal_headers_eq { } }) as std::sync::Arc }; - (@val $name: expr, NONE) => { + (@val $name: expr, NONE) => {{ __internal_headers_eq!(@pat $name, None); - }; - (@val $name: expr, SOME) => { - __internal_headers_eq!(@pat $name, Some(_)); - }; + }}; + (@val $name: expr, SOME) => {{ + __internal_headers_eq!(@pat $name, Some(_)) + }}; (@val $name: expr, $val:expr) => ({ let __val = Option::from($val); std::sync::Arc::new(move |__hdrs: &hyper::HeaderMap| { @@ -232,11 +232,11 @@ macro_rules! __internal_headers_eq { } }) as std::sync::Arc }); - ($headers:ident, { $($name:expr => $val:tt,)* }) => { + ($headers:ident, { $($name:expr => $val:tt,)* }) => {{ $( $headers.push(__internal_headers_eq!(@val $name, $val)); )* - } + }} } #[derive(Clone, Debug)] From 91bbce4ed3bb8379bf4f17e30c8df896c32aa731 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 4 Aug 2021 10:57:23 -0700 Subject: [PATCH 099/420] chore(ffi): update gen_header script to use unpretty option --- capi/gen_header.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/capi/gen_header.sh b/capi/gen_header.sh index a4460c6295..d0b9c13a32 100755 --- a/capi/gen_header.sh +++ b/capi/gen_header.sh @@ -71,7 +71,7 @@ cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" cd "${WORK_DIR}" || exit 2 # Expand just the ffi module -if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then +if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unpretty=expanded 2>&1 > expanded.rs); then # As of April 2021 the script above prints a lot of warnings/errors, and # exits with a nonzero return code, but hyper.h still gets generated. # From 73bff4e98c372ce04b006370c0b0d2af29ea8718 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Wed, 4 Aug 2021 13:52:35 -0700 Subject: [PATCH 100/420] feat(client): expose http09 and http1 options on `client::conn::Builder` (#2611) These options are currently available on the high-level builder only. Along the way, rename the setters to follow the public API conventions and add docs. Closes #2461 --- src/client/client.rs | 12 ++++----- src/client/conn.rs | 61 +++++++++++++++++++++++++++++++++++++++----- src/ffi/client.rs | 2 +- 3 files changed, 62 insertions(+), 13 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index f94d4154b8..e1d5914aba 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -971,7 +971,7 @@ impl Builder { /// /// Default is an adaptive read buffer. pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { - self.conn_builder.h1_read_buf_exact_size(Some(sz)); + self.conn_builder.http1_read_buf_exact_size(Some(sz)); self } @@ -987,7 +987,7 @@ impl Builder { #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - self.conn_builder.h1_max_buf_size(max); + self.conn_builder.http1_max_buf_size(max); self } @@ -1012,7 +1012,7 @@ impl Builder { /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { self.conn_builder - .h1_allow_spaces_after_header_name_in_responses(val); + .http1_allow_spaces_after_header_name_in_responses(val); self } @@ -1023,7 +1023,7 @@ impl Builder { /// /// Default is false. pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { - self.conn_builder.h1_title_case_headers(val); + self.conn_builder.http1_title_case_headers(val); self } @@ -1034,7 +1034,7 @@ impl Builder { /// /// Default is false. pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { - self.conn_builder.h1_preserve_header_case(val); + self.conn_builder.http1_preserve_header_case(val); self } @@ -1042,7 +1042,7 @@ impl Builder { /// /// Default is false. pub fn http09_responses(&mut self, val: bool) -> &mut Self { - self.conn_builder.h09_responses(val); + self.conn_builder.http09_responses(val); self } diff --git a/src/client/conn.rs b/src/client/conn.rs index c557ee29c2..c40452038e 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -550,12 +550,34 @@ impl Builder { self } - pub(super) fn h09_responses(&mut self, enabled: bool) -> &mut Builder { + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { self.h09_responses = enabled; self } - pub(crate) fn h1_allow_spaces_after_header_name_in_responses( + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn http1_allow_spaces_after_header_name_in_responses( &mut self, enabled: bool, ) -> &mut Builder { @@ -564,24 +586,51 @@ impl Builder { self } - pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { self.h1_title_case_headers = enabled; self } - pub(crate) fn h1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + /// Set whether HTTP/1 connections will write header names as provided + /// at the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder { self.h1_preserve_header_case = enabled; self } - pub(super) fn h1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn http1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { self.h1_read_buf_exact_size = sz; self.h1_max_buf_size = None; self } + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. #[cfg(feature = "http1")] - pub(super) fn h1_max_buf_size(&mut self, max: usize) -> &mut Self { + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 6fa8862ddd..2e73e6695a 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -107,7 +107,7 @@ ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { let mut builder = conn::Builder::new(); - builder.h1_preserve_header_case(true); + builder.http1_preserve_header_case(true); Box::into_raw(Box::new(hyper_clientconn_options { builder, From 684f2fa76d44fa2b1b063ad0443a1b0d16dfad0e Mon Sep 17 00:00:00 2001 From: Jan Verbeek Date: Fri, 6 Aug 2021 02:00:57 +0200 Subject: [PATCH 101/420] fix(http1): apply header title case for consecutive dashes (#2613) Fix the header title-casing to work with consecutive dashes. Previously with two dashes in a row the first dash would uppercase the second dash which would then not count, so `weird--header` would be cased as `Weird--header` instead of `Weird--Header`. --- src/proto/h1/role.rs | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 8828b94a0a..5e8bd43a07 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -1308,36 +1308,18 @@ fn record_header_indices( Ok(()) } -// Write header names as title case. The header name is assumed to be ASCII, -// therefore it is trivial to convert an ASCII character from lowercase to -// uppercase. It is as simple as XORing the lowercase character byte with -// space. +// Write header names as title case. The header name is assumed to be ASCII. fn title_case(dst: &mut Vec, name: &[u8]) { dst.reserve(name.len()); - let mut iter = name.iter(); - - // Uppercase the first character - if let Some(c) = iter.next() { - if *c >= b'a' && *c <= b'z' { - dst.push(*c ^ b' '); - } else { - dst.push(*c); - } - } - - while let Some(c) = iter.next() { - dst.push(*c); - - if *c == b'-' { - if let Some(c) = iter.next() { - if *c >= b'a' && *c <= b'z' { - dst.push(*c ^ b' '); - } else { - dst.push(*c); - } - } + // Ensure first character is uppercased + let mut prev = b'-'; + for &(mut c) in name { + if prev == b'-' { + c.make_ascii_uppercase(); } + dst.push(c); + prev = c; } } @@ -2316,6 +2298,8 @@ mod tests { .insert("content-length", HeaderValue::from_static("10")); head.headers .insert("content-type", HeaderValue::from_static("application/json")); + head.headers + .insert("weird--header", HeaderValue::from_static("")); let mut vec = Vec::new(); Server::encode( @@ -2331,7 +2315,7 @@ mod tests { .unwrap(); let expected_response = - b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\n"; + b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\nWeird--Header: \r\n"; assert_eq!(&vec[..expected_response.len()], &expected_response[..]); } From c35153998e96870ed4fed2e2afe538e984cdfad2 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 18 Aug 2021 10:29:20 -0700 Subject: [PATCH 102/420] docs(capi): output the hyper_version in the capi examples (#2623) --- capi/examples/client.c | 2 +- capi/examples/upload.c | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/capi/examples/client.c b/capi/examples/client.c index 0a1f71240c..3cccdd6ae1 100644 --- a/capi/examples/client.c +++ b/capi/examples/client.c @@ -175,7 +175,7 @@ int main(int argc, char *argv[]) { hyper_io_set_read(io, read_cb); hyper_io_set_write(io, write_cb); - printf("http handshake ...\n"); + printf("http handshake (hyper v%s) ...\n", hyper_version()); // We need an executor generally to poll futures const hyper_executor *exec = hyper_executor_new(); diff --git a/capi/examples/upload.c b/capi/examples/upload.c index caf56aafd3..fa7134f369 100644 --- a/capi/examples/upload.c +++ b/capi/examples/upload.c @@ -148,14 +148,13 @@ static int print_each_header(void *userdata, return HYPER_ITER_CONTINUE; } -static void print_informational(void *userdata, hyper_response *resp) { +static void print_informational(void *userdata, const hyper_response *resp) { uint16_t http_status = hyper_response_status(resp); printf("\nInformational (1xx): %d\n", http_status); - hyper_headers *headers = hyper_response_headers(resp); - hyper_headers_foreach(headers, print_each_header, NULL); - printf("\n"); + const hyper_buf* headers = hyper_response_headers_raw(resp); + write(1, hyper_buf_bytes(headers), hyper_buf_len(headers)); } typedef enum { @@ -218,7 +217,7 @@ int main(int argc, char *argv[]) { hyper_io_set_read(io, read_cb); hyper_io_set_write(io, write_cb); - printf("http handshake ...\n"); + printf("http handshake (hyper v%s) ...\n", hyper_version()); // We need an executor generally to poll futures const hyper_executor *exec = hyper_executor_new(); @@ -226,6 +225,7 @@ int main(int argc, char *argv[]) { // Prepare client options hyper_clientconn_options *opts = hyper_clientconn_options_new(); hyper_clientconn_options_exec(opts, exec); + hyper_clientconn_options_headers_raw(opts, 1); hyper_task *handshake = hyper_clientconn_handshake(io, opts); hyper_task_set_userdata(handshake, (void *)EXAMPLE_HANDSHAKE); @@ -275,6 +275,10 @@ int main(int argc, char *argv[]) { hyper_headers_set(req_headers, STR_ARG("host"), STR_ARG(host)); hyper_headers_set(req_headers, STR_ARG("expect"), STR_ARG("100-continue")); + // NOTE: We aren't handling *waiting* for the 100 Continue, + // the body is sent immediately. This will just print if any + // informational headers are received. + printf(" with expect-continue ...\n"); hyper_request_on_informational(req, print_informational, NULL); // Prepare the req body From 3b265728769a1199d6b4b7f66a3645064bf1f885 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 18 Aug 2021 14:15:14 -0700 Subject: [PATCH 103/420] refactor(ffi): check pointer arguments for NULL (#2624) This changes all the extern C functions in `hyper::ffi` to check passed pointer arguments for being `NULL` before trying to use them. Before, we would just assume the programmer had passed a good pointer, which could result in segmentation faults. Now: - In debug builds, it will assert they aren't null, and so if they are, a message identifying the argument name will be printed and then the process will crash. - In release builds, it will still check for null, but if found, it will return early, with a return value indicating failure if the return type allows (such as returning NULL, or `HYPERE_INVALID_ARG`). Closes #2620 --- capi/examples/upload.c | 6 ++++-- src/ffi/body.rs | 18 +++++------------- src/ffi/client.rs | 35 +++++++++++----------------------- src/ffi/error.rs | 6 +++--- src/ffi/http_types.rs | 40 ++++++++++++++++++++++----------------- src/ffi/io.rs | 8 ++++---- src/ffi/macros.rs | 22 +++++++++++++++++++++ src/ffi/task.rs | 43 +++++++++++++----------------------------- 8 files changed, 85 insertions(+), 93 deletions(-) diff --git a/capi/examples/upload.c b/capi/examples/upload.c index fa7134f369..5492944241 100644 --- a/capi/examples/upload.c +++ b/capi/examples/upload.c @@ -153,8 +153,10 @@ static void print_informational(void *userdata, const hyper_response *resp) { printf("\nInformational (1xx): %d\n", http_status); - const hyper_buf* headers = hyper_response_headers_raw(resp); - write(1, hyper_buf_bytes(headers), hyper_buf_len(headers)); + const hyper_buf *headers = hyper_response_headers_raw(resp); + if (headers) { + write(1, hyper_buf_bytes(headers), hyper_buf_len(headers)); + } } typedef enum { diff --git a/src/ffi/body.rs b/src/ffi/body.rs index d6e1394371..932200b54d 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -40,11 +40,7 @@ ffi_fn! { ffi_fn! { /// Free a `hyper_body *`. fn hyper_body_free(body: *mut hyper_body) { - if body.is_null() { - return; - } - - drop(unsafe { Box::from_raw(body) }); + drop(non_null!(Box::from_raw(body) ?= ())); } } @@ -61,7 +57,7 @@ ffi_fn! { /// However, it MUST NOT be used or freed until the related task completes. fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { // This doesn't take ownership of the Body, so don't allow destructor - let mut body = ManuallyDrop::new(unsafe { Box::from_raw(body) }); + let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut())); Box::into_raw(hyper_task::boxed(async move { body.0.data().await.map(|res| res.map(hyper_buf)) @@ -81,11 +77,7 @@ ffi_fn! { /// /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { - if body.is_null() { - return ptr::null_mut(); - } - - let mut body = unsafe { Box::from_raw(body) }; + let mut body = non_null!(Box::from_raw(body) ?= ptr::null_mut()); let userdata = UserDataPointer(userdata); Box::into_raw(hyper_task::boxed(async move { @@ -103,7 +95,7 @@ ffi_fn! { ffi_fn! { /// Set userdata on this body, which will be passed to callback functions. fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) { - let b = unsafe { &mut *body }; + let b = non_null!(&mut *body ?= ()); b.0.as_ffi_mut().userdata = userdata; } } @@ -129,7 +121,7 @@ ffi_fn! { /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort /// the body. fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) { - let b = unsafe { &mut *body }; + let b = non_null!{ &mut *body ?= () }; b.0.as_ffi_mut().data_func = func; } } diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 2e73e6695a..1e5f29d548 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -1,3 +1,4 @@ +use std::ptr; use std::sync::Arc; use libc::c_int; @@ -37,15 +38,8 @@ ffi_fn! { /// The returned `hyper_task *` must be polled with an executor until the /// handshake completes, at which point the value can be taken. fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { - if io.is_null() { - return std::ptr::null_mut(); - } - if options.is_null() { - return std::ptr::null_mut(); - } - - let options = unsafe { Box::from_raw(options) }; - let io = unsafe { Box::from_raw(io) }; + let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; + let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; Box::into_raw(hyper_task::boxed(async move { options.builder.handshake::<_, crate::Body>(io) @@ -66,19 +60,12 @@ ffi_fn! { /// Returns a task that needs to be polled until it is ready. When ready, the /// task yields a `hyper_response *`. fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { - if conn.is_null() { - return std::ptr::null_mut(); - } - if req.is_null() { - return std::ptr::null_mut(); - } - - let mut req = unsafe { Box::from_raw(req) }; + let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; // Update request with original-case map of headers req.finalize_request(); - let fut = unsafe { &mut *conn }.tx.send_request(req.0); + let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0); let fut = async move { fut.await.map(hyper_response::wrap) @@ -91,7 +78,7 @@ ffi_fn! { ffi_fn! { /// Free a `hyper_clientconn *`. fn hyper_clientconn_free(conn: *mut hyper_clientconn) { - drop(unsafe { Box::from_raw(conn) }); + drop(non_null! { Box::from_raw(conn) ?= () }); } } @@ -119,7 +106,7 @@ ffi_fn! { ffi_fn! { /// Free a `hyper_clientconn_options *`. fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { - drop(unsafe { Box::from_raw(opts) }); + drop(non_null! { Box::from_raw(opts) ?= () }); } } @@ -128,9 +115,9 @@ ffi_fn! { /// /// This does not consume the `options` or the `exec`. fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) { - let opts = unsafe { &mut *opts }; + let opts = non_null! { &mut *opts ?= () }; - let exec = unsafe { Arc::from_raw(exec) }; + let exec = non_null! { Arc::from_raw(exec) ?= () }; let weak_exec = hyper_executor::downgrade(&exec); std::mem::forget(exec); @@ -146,7 +133,7 @@ ffi_fn! { fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { #[cfg(feature = "http2")] { - let opts = unsafe { &mut *opts }; + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; opts.builder.http2_only(enabled != 0); hyper_code::HYPERE_OK } @@ -168,7 +155,7 @@ ffi_fn! { /// /// If enabled, see `hyper_response_headers_raw()` for usage. fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { - let opts = unsafe { &mut *opts }; + let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; opts.builder.http1_headers_raw(enabled != 0); hyper_code::HYPERE_OK } diff --git a/src/ffi/error.rs b/src/ffi/error.rs index 7b85407099..015e595aee 100644 --- a/src/ffi/error.rs +++ b/src/ffi/error.rs @@ -58,14 +58,14 @@ impl hyper_error { ffi_fn! { /// Frees a `hyper_error`. fn hyper_error_free(err: *mut hyper_error) { - drop(unsafe { Box::from_raw(err) }); + drop(non_null!(Box::from_raw(err) ?= ())); } } ffi_fn! { /// Get an equivalent `hyper_code` from this error. fn hyper_error_code(err: *const hyper_error) -> hyper_code { - unsafe { &*err }.code() + non_null!(&*err ?= hyper_code::HYPERE_INVALID_ARG).code() } } @@ -80,6 +80,6 @@ ffi_fn! { let dst = unsafe { std::slice::from_raw_parts_mut(dst, dst_len) }; - unsafe { &*err }.print_to(dst) + non_null!(&*err ?= 0).print_to(dst) } } diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index e26557cebf..f60e60bc8a 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -48,7 +48,7 @@ ffi_fn! { ffi_fn! { /// Free an HTTP request if not going to send it on a client. fn hyper_request_free(req: *mut hyper_request) { - drop(unsafe { Box::from_raw(req) }); + drop(non_null!(Box::from_raw(req) ?= ())); } } @@ -58,9 +58,10 @@ ffi_fn! { let bytes = unsafe { std::slice::from_raw_parts(method, method_len as usize) }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); match Method::from_bytes(bytes) { Ok(m) => { - *unsafe { &mut *req }.0.method_mut() = m; + *req.0.method_mut() = m; hyper_code::HYPERE_OK }, Err(_) => { @@ -76,9 +77,10 @@ ffi_fn! { let bytes = unsafe { std::slice::from_raw_parts(uri, uri_len as usize) }; + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); match Uri::from_maybe_shared(bytes) { Ok(u) => { - *unsafe { &mut *req }.0.uri_mut() = u; + *req.0.uri_mut() = u; hyper_code::HYPERE_OK }, Err(_) => { @@ -98,7 +100,8 @@ ffi_fn! { fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code { use http::Version; - *unsafe { &mut *req }.0.version_mut() = match version { + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.version_mut() = match version { super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11, super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10, super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11, @@ -130,8 +133,9 @@ ffi_fn! { /// This takes ownership of the `hyper_body *`, you must not use it or /// free it after setting it on the request. fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code { - let body = unsafe { Box::from_raw(body) }; - *unsafe { &mut *req }.0.body_mut() = body.0; + let body = non_null!(Box::from_raw(body) ?= hyper_code::HYPERE_INVALID_ARG); + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + *req.0.body_mut() = body.0; hyper_code::HYPERE_OK } } @@ -157,7 +161,8 @@ ffi_fn! { func: callback, data: UserDataPointer(data), }; - unsafe { &mut *req }.0.extensions_mut().insert(ext); + let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); + req.0.extensions_mut().insert(ext); hyper_code::HYPERE_OK } } @@ -176,7 +181,7 @@ impl hyper_request { ffi_fn! { /// Free an HTTP response after using it. fn hyper_response_free(resp: *mut hyper_response) { - drop(unsafe { Box::from_raw(resp) }); + drop(non_null!(Box::from_raw(resp) ?= ())); } } @@ -185,7 +190,7 @@ ffi_fn! { /// /// It will always be within the range of 100-599. fn hyper_response_status(resp: *const hyper_response) -> u16 { - unsafe { &*resp }.0.status().as_u16() + non_null!(&*resp ?= 0).0.status().as_u16() } } @@ -200,7 +205,7 @@ ffi_fn! { /// Use `hyper_response_reason_phrase_len()` to get the length of this /// buffer. fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { - unsafe { &*resp }.reason_phrase().as_ptr() + non_null!(&*resp ?= std::ptr::null()).reason_phrase().as_ptr() } ?= std::ptr::null() } @@ -209,7 +214,7 @@ ffi_fn! { /// /// Use `hyper_response_reason_phrase()` to get the buffer pointer. fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t { - unsafe { &*resp }.reason_phrase().len() + non_null!(&*resp ?= 0).reason_phrase().len() } } @@ -226,7 +231,8 @@ ffi_fn! { /// The buffer is not null-terminated, see the `hyper_buf` functions for /// getting the bytes and length. fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { - match unsafe { &*resp }.0.extensions().get::() { + let resp = non_null!(&*resp ?= std::ptr::null()); + match resp.0.extensions().get::() { Some(raw) => &raw.0, None => std::ptr::null(), } @@ -245,7 +251,7 @@ ffi_fn! { fn hyper_response_version(resp: *const hyper_response) -> c_int { use http::Version; - match unsafe { &*resp }.0.version() { + match non_null!(&*resp ?= 0).0.version() { Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0, Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1, Version::HTTP_2 => super::HYPER_HTTP_VERSION_2, @@ -269,7 +275,7 @@ ffi_fn! { /// /// It is safe to free the response even after taking ownership of its body. fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { - let body = std::mem::take(unsafe { &mut *resp }.0.body_mut()); + let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut()); Box::into_raw(Box::new(hyper_body(body))) } ?= std::ptr::null_mut() } @@ -331,7 +337,7 @@ ffi_fn! { /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or /// `HYPER_ITER_BREAK` to stop. fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { - let headers = unsafe { &*headers }; + let headers = non_null!(&*headers ?= ()); // For each header name/value pair, there may be a value in the casemap // that corresponds to the HeaderValue. So, we iterator all the keys, // and for each one, try to pair the originally cased name with the value. @@ -366,7 +372,7 @@ ffi_fn! { /// /// This overwrites any previous value set for the header. fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { - let headers = unsafe { &mut *headers }; + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); match unsafe { raw_name_value(name, name_len, value, value_len) } { Ok((name, value, orig_name)) => { headers.headers.insert(&name, value); @@ -384,7 +390,7 @@ ffi_fn! { /// If there were already existing values for the name, this will append the /// new value to the internal list. fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { - let headers = unsafe { &mut *headers }; + let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); match unsafe { raw_name_value(name, name_len, value, value_len) } { Ok((name, value, orig_name)) => { diff --git a/src/ffi/io.rs b/src/ffi/io.rs index 7fb4538815..bff666dbcf 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -46,7 +46,7 @@ ffi_fn! { /// This is typically only useful if you aren't going to pass ownership /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. fn hyper_io_free(io: *mut hyper_io) { - drop(unsafe { Box::from_raw(io) }); + drop(non_null!(Box::from_raw(io) ?= ())); } } @@ -55,7 +55,7 @@ ffi_fn! { /// /// This value is passed as an argument to the read and write callbacks. fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) { - unsafe { &mut *io }.userdata = data; + non_null!(&mut *io ?= ()).userdata = data; } } @@ -77,7 +77,7 @@ ffi_fn! { /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) { - unsafe { &mut *io }.read = func; + non_null!(&mut *io ?= ()).read = func; } } @@ -96,7 +96,7 @@ ffi_fn! { /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` /// should be the return value. fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) { - unsafe { &mut *io }.write = func; + non_null!(&mut *io ?= ()).write = func; } } diff --git a/src/ffi/macros.rs b/src/ffi/macros.rs index 12064d41a2..022711baaa 100644 --- a/src/ffi/macros.rs +++ b/src/ffi/macros.rs @@ -29,3 +29,25 @@ macro_rules! ffi_fn { ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); }; } + +macro_rules! non_null { + ($ptr:ident, $eval:expr, $err:expr) => {{ + debug_assert!(!$ptr.is_null(), "{:?} must not be null", stringify!($ptr)); + if $ptr.is_null() { + return $err; + } + unsafe { $eval } + }}; + (&*$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &*$ptr, $err) + }}; + (&mut *$ptr:ident ?= $err:expr) => {{ + non_null!($ptr, &mut *$ptr, $err) + }}; + (Box::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Box::from_raw($ptr), $err) + }}; + (Arc::from_raw($ptr:ident) ?= $err:expr) => {{ + non_null!($ptr, Arc::from_raw($ptr), $err) + }}; +} diff --git a/src/ffi/task.rs b/src/ffi/task.rs index f92798f0c8..e951e0dacc 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -195,7 +195,7 @@ ffi_fn! { ffi_fn! { /// Frees an executor and any incomplete tasks still part of it. fn hyper_executor_free(exec: *const hyper_executor) { - drop(unsafe { Arc::from_raw(exec) }); + drop(non_null!(Arc::from_raw(exec) ?= ())); } } @@ -205,11 +205,8 @@ ffi_fn! { /// The executor takes ownership of the task, it should not be accessed /// again unless returned back to the user with `hyper_executor_poll`. fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { - if exec.is_null() || task.is_null() { - return hyper_code::HYPERE_INVALID_ARG; - } - let exec = unsafe { &*exec }; - let task = unsafe { Box::from_raw(task) }; + let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); + let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG); exec.spawn(task); hyper_code::HYPERE_OK } @@ -223,9 +220,7 @@ ffi_fn! { /// /// If there are no ready tasks, this returns `NULL`. fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { - // We only want an `&Arc` in here, so wrap in a `ManuallyDrop` so we - // don't accidentally trigger a ref_dec of the Arc. - let exec = unsafe { &*exec }; + let exec = non_null!(&*exec ?= ptr::null_mut()); match exec.poll_next() { Some(task) => Box::into_raw(task), None => ptr::null_mut(), @@ -274,7 +269,7 @@ impl Future for TaskFuture { ffi_fn! { /// Free a task. fn hyper_task_free(task: *mut hyper_task) { - drop(unsafe { Box::from_raw(task) }); + drop(non_null!(Box::from_raw(task) ?= ())); } } @@ -286,11 +281,7 @@ ffi_fn! { /// /// Use `hyper_task_type` to determine the type of the `void *` return value. fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { - if task.is_null() { - return ptr::null_mut(); - } - - let task = unsafe { &mut *task }; + let task = non_null!(&mut *task ?= ptr::null_mut()); if let Some(val) = task.output.take() { let p = Box::into_raw(val) as *mut c_void; @@ -309,13 +300,9 @@ ffi_fn! { ffi_fn! { /// Query the return type of this task. fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type { - if task.is_null() { - // instead of blowing up spectacularly, just say this null task - // doesn't have a value to retrieve. - return hyper_task_return_type::HYPER_TASK_EMPTY; - } - - unsafe { &*task }.output_type() + // instead of blowing up spectacularly, just say this null task + // doesn't have a value to retrieve. + non_null!(&*task ?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type() } } @@ -336,11 +323,7 @@ ffi_fn! { ffi_fn! { /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void { - if task.is_null() { - return ptr::null_mut(); - } - - unsafe { &*task }.userdata.0 + non_null!(&*task ?= ptr::null_mut()).userdata.0 } ?= ptr::null_mut() } @@ -403,7 +386,7 @@ impl hyper_context<'_> { ffi_fn! { /// Copies a waker out of the task context. fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { - let waker = unsafe { &mut *cx }.0.waker().clone(); + let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); Box::into_raw(Box::new(hyper_waker { waker })) } ?= ptr::null_mut() } @@ -413,7 +396,7 @@ ffi_fn! { ffi_fn! { /// Free a waker that hasn't been woken. fn hyper_waker_free(waker: *mut hyper_waker) { - drop(unsafe { Box::from_raw(waker) }); + drop(non_null!(Box::from_raw(waker) ?= ())); } } @@ -422,7 +405,7 @@ ffi_fn! { /// /// NOTE: This consumes the waker. You should not use or free the waker afterwards. fn hyper_waker_wake(waker: *mut hyper_waker) { - let waker = unsafe { Box::from_raw(waker) }; + let waker = non_null!(Box::from_raw(waker) ?= ()); waker.waker.wake(); } } From a1995ee1cb7aa1306f6c5cbf3f50eafac49776c4 Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Wed, 18 Aug 2021 23:16:07 +0200 Subject: [PATCH 104/420] docs(capi): build 'upload' example too by default (#2625) --- capi/examples/Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/capi/examples/Makefile b/capi/examples/Makefile index 6cc0a69575..2bd5c44982 100644 --- a/capi/examples/Makefile +++ b/capi/examples/Makefile @@ -3,20 +3,23 @@ # TARGET = client +TARGET2 = upload OBJS = client.o +OBJS2 = upload.o RPATH=$(PWD)/../../target/debug CFLAGS = -I../include LDFLAGS = -L$(RPATH) -Wl,-rpath,$(RPATH) LIBS = -lhyper +all: $(TARGET) $(TARGET2) + $(TARGET): $(OBJS) $(CC) -o $(TARGET) $(OBJS) $(LDFLAGS) $(LIBS) -upload: upload.o - $(CC) -o upload upload.o $(LDFLAGS) $(LIBS) +$(TARGET2): $(OBJS2) + $(CC) -o $(TARGET2) $(OBJS) $(LDFLAGS) $(LIBS) clean: - rm -f $(OBJS) $(TARGET) - rm -f upload upload.o + rm -f $(OBJS) $(TARGET) $(OBJS2) $(TARGET2) From 9a113ed4169a530b94fbd99f1e0f4b7902fb655c Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Thu, 19 Aug 2021 17:50:21 +0200 Subject: [PATCH 105/420] docs(capi): fix typo in the upload example build (#2626) --- capi/examples/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/capi/examples/Makefile b/capi/examples/Makefile index 2bd5c44982..951c99fe62 100644 --- a/capi/examples/Makefile +++ b/capi/examples/Makefile @@ -19,7 +19,7 @@ $(TARGET): $(OBJS) $(CC) -o $(TARGET) $(OBJS) $(LDFLAGS) $(LIBS) $(TARGET2): $(OBJS2) - $(CC) -o $(TARGET2) $(OBJS) $(LDFLAGS) $(LIBS) + $(CC) -o $(TARGET2) $(OBJS2) $(LDFLAGS) $(LIBS) clean: rm -f $(OBJS) $(TARGET) $(OBJS2) $(TARGET2) From be08648e8298cdb13e9879ee761a73f827268962 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Thu, 19 Aug 2021 20:05:26 +0200 Subject: [PATCH 106/420] fix(http2): improve errors emitted by HTTP2 `Upgraded` stream shutdown (#2622) --- src/proto/h2/mod.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index b8312aff64..2b066968d5 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -377,9 +377,24 @@ where fn poll_shutdown( mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, + cx: &mut Context<'_>, ) -> Poll> { - Poll::Ready(self.send_stream.write(&[], true)) + if self.send_stream.write(&[], true).is_ok() { + return Poll::Ready(Ok(())) + } + + Poll::Ready(Err(h2_to_io_error( + match ready!(self.send_stream.poll_reset(cx)) { + Ok(Reason::NO_ERROR) => { + return Poll::Ready(Ok(())) + } + Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + Ok(reason) => reason.into(), + Err(e) => e, + }, + ))) } } From 95a978344c29351e2e381af0a91772093e01e255 Mon Sep 17 00:00:00 2001 From: Soveu Date: Thu, 19 Aug 2021 20:10:30 +0200 Subject: [PATCH 107/420] refactor(http1): use MaybeUninit for parsing with uninitialized headers (#2545) Closes #2532 --- src/proto/h1/role.rs | 55 ++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 5e8bd43a07..318f0c441a 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -1,9 +1,5 @@ -// `mem::uninitialized` replaced with `mem::MaybeUninit`, -// can't upgrade yet -#![allow(deprecated)] - use std::fmt::{self, Write}; -use std::mem; +use std::mem::{self, MaybeUninit}; #[cfg(any(test, feature = "server", feature = "ffi"))] use bytes::Bytes; @@ -115,17 +111,23 @@ impl Http1Transaction for Server { // but we *never* read any of it until after httparse has assigned // values into it. By not zeroing out the stack memory, this saves // a good ~5% on pipeline benchmarks. - let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; { - let mut headers: [httparse::Header<'_>; MAX_HEADERS] = unsafe { mem::uninitialized() }; + /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ + let mut headers: [MaybeUninit>; MAX_HEADERS] = unsafe { + MaybeUninit::uninit().assume_init() + }; trace!( "Request.parse([Header; {}], [u8; {}])", headers.len(), buf.len() ); - let mut req = httparse::Request::new(&mut headers); + let mut req = httparse::Request::new(&mut []); let bytes = buf.as_ref(); - match req.parse(bytes) { + match req.parse_with_uninit_headers(bytes, &mut headers) { Ok(httparse::Status::Complete(parsed_len)) => { trace!("Request.parse Complete({})", parsed_len); len = parsed_len; @@ -194,6 +196,8 @@ impl Http1Transaction for Server { headers.reserve(headers_len); for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -867,18 +871,24 @@ impl Http1Transaction for Client { // Loop to skip information status code headers (100 Continue, etc). loop { // Unsafe: see comment in Server Http1Transaction, above. - let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() }; + let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + MaybeUninit::uninit().assume_init() + }; let (len, status, reason, version, headers_len) = { - let mut headers: [httparse::Header<'_>; MAX_HEADERS] = - unsafe { mem::uninitialized() }; + // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; trace!( "Response.parse([Header; {}], [u8; {}])", headers.len(), buf.len() ); - let mut res = httparse::Response::new(&mut headers); + let mut res = httparse::Response::new(&mut []); let bytes = buf.as_ref(); - match ctx.h1_parser_config.parse_response(&mut res, bytes) { + match ctx.h1_parser_config + .parse_response_with_uninit_headers(&mut res, bytes, &mut headers) + { Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; @@ -934,6 +944,8 @@ impl Http1Transaction for Client { headers.reserve(headers_len); for header in &headers_indices[..headers_len] { + // SAFETY: array is valid up to `headers_len` + let header = unsafe { &*header.as_ptr() }; let name = header_name!(&slice[header.name.0..header.name.1]); let value = header_value!(slice.slice(header.value.0..header.value.1)); @@ -1288,7 +1300,7 @@ struct HeaderIndices { fn record_header_indices( bytes: &[u8], headers: &[httparse::Header<'_>], - indices: &mut [HeaderIndices], + indices: &mut [MaybeUninit], ) -> Result<(), crate::error::Parse> { let bytes_ptr = bytes.as_ptr() as usize; @@ -1299,10 +1311,19 @@ fn record_header_indices( } let name_start = header.name.as_ptr() as usize - bytes_ptr; let name_end = name_start + header.name.len(); - indices.name = (name_start, name_end); let value_start = header.value.as_ptr() as usize - bytes_ptr; let value_end = value_start + header.value.len(); - indices.value = (value_start, value_end); + + // FIXME(maybe_uninit_extra) + // FIXME(addr_of) + // Currently we don't have `ptr::addr_of_mut` in stable rust or + // MaybeUninit::write, so this is some way of assigning into a MaybeUninit + // safely + let new_header_indices = HeaderIndices { + name: (name_start, name_end), + value: (value_start, value_end), + }; + *indices = MaybeUninit::new(new_header_indices); } Ok(()) From adaa8b3f0e97f5b5ceac91650d71bebb4476786b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 19 Aug 2021 11:14:32 -0700 Subject: [PATCH 108/420] chore(dependencies): require httparse 1.5.1 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2cac5eabb0..309a2c25eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "0.4" httpdate = "1.0" -httparse = "1.4" +httparse = "1.5.1" h2 = { version = "0.3.3", optional = true } itoa = "0.4.1" tracing = { version = "0.1", default-features = false, features = ["std"] } From 39b6d01aa0e520077bb25e16811f5ece00a224d6 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 19 Aug 2021 11:49:12 -0700 Subject: [PATCH 109/420] fix(ffi): on_informational callback had no headers --- capi/examples/upload.c | 2 +- capi/include/hyper.h | 4 ++-- src/ffi/http_types.rs | 6 +++--- src/proto/h1/role.rs | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/capi/examples/upload.c b/capi/examples/upload.c index 5492944241..10582c8867 100644 --- a/capi/examples/upload.c +++ b/capi/examples/upload.c @@ -148,7 +148,7 @@ static int print_each_header(void *userdata, return HYPER_ITER_CONTINUE; } -static void print_informational(void *userdata, const hyper_response *resp) { +static void print_informational(void *userdata, hyper_response *resp) { uint16_t http_status = hyper_response_status(resp); printf("\nInformational (1xx): %d\n", http_status); diff --git a/capi/include/hyper.h b/capi/include/hyper.h index d969f973fe..64e28bf894 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -207,7 +207,7 @@ typedef int (*hyper_body_foreach_callback)(void*, const struct hyper_buf*); typedef int (*hyper_body_data_callback)(void*, struct hyper_context*, struct hyper_buf**); -typedef void (*hyper_request_on_informational_callback)(void*, const struct hyper_response*); +typedef void (*hyper_request_on_informational_callback)(void*, struct hyper_response*); typedef int (*hyper_headers_foreach_callback)(void*, const uint8_t*, size_t, const uint8_t*, size_t); @@ -469,7 +469,7 @@ enum hyper_code hyper_request_set_body(struct hyper_request *req, struct hyper_b `hyper_response *` which can be inspected as any other response. The body of the response will always be empty. - NOTE: The `const hyper_response *` is just borrowed data, and will not + NOTE: The `hyper_response *` is just borrowed data, and will not be valid after the callback finishes. You must copy any data you wish to persist. */ diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index f60e60bc8a..e7daaeba79 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -34,7 +34,7 @@ pub(crate) struct OnInformational { data: UserDataPointer, } -type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *const hyper_response); +type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut hyper_response); // ===== impl hyper_request ===== @@ -153,7 +153,7 @@ ffi_fn! { /// `hyper_response *` which can be inspected as any other response. The /// body of the response will always be empty. /// - /// NOTE: The `const hyper_response *` is just borrowed data, and will not + /// NOTE: The `hyper_response *` is just borrowed data, and will not /// be valid after the callback finishes. You must copy any data you wish /// to persist. fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code { @@ -437,7 +437,7 @@ unsafe fn raw_name_value( impl OnInformational { pub(crate) fn call(&mut self, resp: Response) { - let mut resp = hyper_response(resp); + let mut resp = hyper_response::wrap(resp); (self.func)(self.data.0, &mut resp); } } diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 318f0c441a..50433b8af9 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -1,5 +1,5 @@ use std::fmt::{self, Write}; -use std::mem::{self, MaybeUninit}; +use std::mem::MaybeUninit; #[cfg(any(test, feature = "server", feature = "ffi"))] use bytes::Bytes; @@ -360,7 +360,7 @@ impl Http1Transaction for Server { } let orig_headers; - let extensions = mem::take(&mut msg.head.extensions); + let extensions = std::mem::take(&mut msg.head.extensions); let orig_headers = match extensions.get::() { None if msg.title_case_headers => { orig_headers = HeaderCaseMap::default(); From f46b175bf71b202fbb907c4970b5743881b891e1 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 24 Aug 2021 13:04:34 -0700 Subject: [PATCH 110/420] v0.14.12 --- CHANGELOG.md | 15 +++++++++++++++ Cargo.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75b93a427e..46dfc851f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +### v0.14.12 (2021-08-24) + + +#### Bug Fixes + +* **ffi:** on_informational callback had no headers ([39b6d01a](https://github.com/hyperium/hyper/commit/39b6d01aa0e520077bb25e16811f5ece00a224d6)) +* **http1:** apply header title case for consecutive dashes (#2613) ([684f2fa7](https://github.com/hyperium/hyper/commit/684f2fa76d44fa2b1b063ad0443a1b0d16dfad0e)) +* **http2:** improve errors emitted by HTTP2 `Upgraded` stream shutdown (#2622) ([be08648e](https://github.com/hyperium/hyper/commit/be08648e8298cdb13e9879ee761a73f827268962)) + + +#### Features + +* **client:** expose http09 and http1 options on `client::conn::Builder` (#2611) ([73bff4e9](https://github.com/hyperium/hyper/commit/73bff4e98c372ce04b006370c0b0d2af29ea8718), closes [#2461](https://github.com/hyperium/hyper/issues/2461)) + + ### v0.14.11 (2021-07-21) diff --git a/Cargo.toml b/Cargo.toml index 309a2c25eb..b171ca5a37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.11" # don't forget to update html_root_url +version = "0.14.12" description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From 165ada34a3525876890c644a61efd4b17cb4adc0 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 26 Aug 2021 16:38:00 -0700 Subject: [PATCH 111/420] docs(ffi): note that hyper_buf_copy can return NULL on error --- capi/include/hyper.h | 2 ++ src/ffi/body.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 64e28bf894..2245d6a45d 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -299,6 +299,8 @@ void hyper_body_set_data_func(struct hyper_body *body, hyper_body_data_callback This makes an owned copy of the bytes, so the `buf` argument can be freed or changed afterwards. + + This returns `NULL` if allocating a new buffer fails. */ struct hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len); diff --git a/src/ffi/body.rs b/src/ffi/body.rs index 932200b54d..39ba5beffb 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -185,6 +185,8 @@ ffi_fn! { /// /// This makes an owned copy of the bytes, so the `buf` argument can be /// freed or changed afterwards. + /// + /// This returns `NULL` if allocating a new buffer fails. fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { let slice = unsafe { std::slice::from_raw_parts(buf, len) From a81c44f2c8620bad34b89b2a403be5fd70157fef Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 14:31:20 +0200 Subject: [PATCH 112/420] refactor(lib): Import tracing macros per-module Instead of one #[macro_use] at the crate root. --- src/body/length.rs | 2 ++ src/client/client.rs | 1 + src/client/conn.rs | 1 + src/client/connect/dns.rs | 1 + src/client/connect/http.rs | 1 + src/client/dispatch.rs | 1 + src/client/pool.rs | 1 + src/client/service.rs | 2 ++ src/lib.rs | 7 ------- src/proto/h1/conn.rs | 6 +++--- src/proto/h1/decode.rs | 1 + src/proto/h1/dispatch.rs | 1 + src/proto/h1/encode.rs | 1 + src/proto/h1/io.rs | 1 + src/proto/h1/role.rs | 14 ++++++++------ src/proto/h2/client.rs | 1 + src/proto/h2/mod.rs | 1 + src/proto/h2/ping.rs | 1 + src/proto/h2/server.rs | 1 + src/server/conn.rs | 2 ++ src/server/shutdown.rs | 1 + src/server/tcp.rs | 1 + src/upgrade.rs | 2 ++ 23 files changed, 35 insertions(+), 16 deletions(-) diff --git a/src/body/length.rs b/src/body/length.rs index 633a911fb2..6e6daa6b09 100644 --- a/src/body/length.rs +++ b/src/body/length.rs @@ -50,6 +50,8 @@ impl DecodedLength { /// Checks the `u64` is within the maximum allowed for content-length. #[cfg(any(feature = "http1", feature = "http2"))] pub(crate) fn checked_new(len: u64) -> Result { + use tracing::warn; + if len <= MAX_LEN { Ok(DecodedLength(len)) } else { diff --git a/src/client/client.rs b/src/client/client.rs index e1d5914aba..58cd3b1c81 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -8,6 +8,7 @@ use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use http::header::{HeaderValue, HOST}; use http::uri::{Port, Scheme}; use http::{Method, Request, Response, Uri, Version}; +use tracing::{debug, trace, warn}; use super::conn; use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; diff --git a/src/client/conn.rs b/src/client/conn.rs index c40452038e..9691394500 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -60,6 +60,7 @@ use httparse::ParserConfig; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tower_service::Service; +use tracing::{debug, trace}; use super::dispatch; use crate::body::HttpBody; diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs index 08cbb1e883..0036fa2d8c 100644 --- a/src/client/connect/dns.rs +++ b/src/client/connect/dns.rs @@ -31,6 +31,7 @@ use std::{fmt, io, vec}; use tokio::task::JoinHandle; use tower_service::Service; +use tracing::debug; pub(super) use self::sealed::Resolve; diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index 0f1a487adb..50bb3309a0 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -14,6 +14,7 @@ use http::uri::{Scheme, Uri}; use pin_project_lite::pin_project; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; +use tracing::{debug, trace, warn}; use super::dns::{self, resolve, GaiResolver, Resolve}; use super::{Connected, Connection}; diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 484cb04f4b..1d2b87eb00 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -235,6 +235,7 @@ impl Callback { mut when: impl Future)>> + Unpin, ) { use futures_util::future; + use tracing::trace; let mut cb = Some(self); diff --git a/src/client/pool.rs b/src/client/pool.rs index 9beca9f472..44d4df57e8 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -10,6 +10,7 @@ use std::time::{Duration, Instant}; use futures_channel::oneshot; #[cfg(feature = "runtime")] use tokio::time::{Duration, Instant, Interval}; +use tracing::{debug, trace}; use super::client::Ver; use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; diff --git a/src/client/service.rs b/src/client/service.rs index 4013c5e54e..406f61edc9 100644 --- a/src/client/service.rs +++ b/src/client/service.rs @@ -6,6 +6,8 @@ use std::error::Error as StdError; use std::future::Future; use std::marker::PhantomData; +use tracing::debug; + use super::conn::{Builder, SendRequest}; use crate::{ body::HttpBody, diff --git a/src/lib.rs b/src/lib.rs index eb7c1730d0..3c07272379 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -58,13 +58,6 @@ #[doc(hidden)] pub use http; -#[cfg(any( - feature = "http1", - feature = "http2", - all(feature = "client", feature = "tcp") -))] -#[macro_use] -extern crate tracing; #[cfg(all(test, feature = "nightly"))] extern crate test; diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index f3eb01addb..c84689e0b1 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -7,6 +7,7 @@ use http::header::{HeaderValue, CONNECTION}; use http::{HeaderMap, Method, Version}; use httparse::ParserConfig; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, error, trace}; use super::io::Buffered; use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; @@ -538,9 +539,8 @@ where #[cfg(feature = "ffi")] { - self.state.on_informational = head - .extensions - .remove::(); + self.state.on_informational = + head.extensions.remove::(); } Some(encoder) diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 1e6027d906..1e3a38effc 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -4,6 +4,7 @@ use std::io; use std::usize; use bytes::Bytes; +use tracing::{debug, trace}; use crate::common::{task, Poll}; diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 51fabc63ad..d2c1428a75 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -3,6 +3,7 @@ use std::error::Error as StdError; use bytes::{Buf, Bytes}; use http::Request; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace}; use super::{Http1Transaction, Wants}; use crate::body::{Body, DecodedLength, HttpBody}; diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs index 6a3703994a..703f4f4fb9 100644 --- a/src/proto/h1/encode.rs +++ b/src/proto/h1/encode.rs @@ -3,6 +3,7 @@ use std::io::IoSlice; use bytes::buf::{Chain, Take}; use bytes::Buf; +use tracing::trace; use super::io::WriteBuf; diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 2ff3d5a48a..db4eece695 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -6,6 +6,7 @@ use std::mem::MaybeUninit; use bytes::{Buf, BufMut, Bytes, BytesMut}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::{debug, trace}; use super::{Http1Transaction, ParseContext, ParsedMessage}; use crate::common::buf::BufList; diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 50433b8af9..81cb86e846 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -8,6 +8,7 @@ use bytes::BytesMut; use http::header::ValueIter; use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; +use tracing::{debug, error, trace, trace_span, warn}; use crate::body::DecodedLength; #[cfg(feature = "server")] @@ -117,9 +118,8 @@ impl Http1Transaction for Server { }; { /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ - let mut headers: [MaybeUninit>; MAX_HEADERS] = unsafe { - MaybeUninit::uninit().assume_init() - }; + let mut headers: [MaybeUninit>; MAX_HEADERS] = + unsafe { MaybeUninit::uninit().assume_init() }; trace!( "Request.parse([Header; {}], [u8; {}])", headers.len(), @@ -886,9 +886,11 @@ impl Http1Transaction for Client { ); let mut res = httparse::Response::new(&mut []); let bytes = buf.as_ref(); - match ctx.h1_parser_config - .parse_response_with_uninit_headers(&mut res, bytes, &mut headers) - { + match ctx.h1_parser_config.parse_response_with_uninit_headers( + &mut res, + bytes, + &mut headers, + ) { Ok(httparse::Status::Complete(len)) => { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 3692a8f253..ae20c8515b 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -9,6 +9,7 @@ use futures_util::stream::StreamExt as _; use h2::client::{Builder, SendRequest}; use http::{Method, StatusCode}; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::HttpBody; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index 2b066968d5..7ad4b48a31 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -8,6 +8,7 @@ use std::io::{self, Cursor, IoSlice}; use std::mem; use std::task::Context; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tracing::{debug, trace, warn}; use crate::body::HttpBody; use crate::common::{task, Future, Pin, Poll}; diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs index 3ff45cae1e..1e8386497c 100644 --- a/src/proto/h2/ping.rs +++ b/src/proto/h2/ping.rs @@ -34,6 +34,7 @@ use std::time::Instant; use h2::{Ping, PingPong}; #[cfg(feature = "runtime")] use tokio::time::{Instant, Sleep}; +use tracing::{debug, trace}; type WindowSize = u32; diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index 1222663dda..ad06174646 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -9,6 +9,7 @@ use h2::{Reason, RecvStream}; use http::{Method, Request}; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::{debug, trace, warn}; use super::{ping, PipeToSendStream, SendBuf}; use crate::body::HttpBody; diff --git a/src/server/conn.rs b/src/server/conn.rs index 085f890139..07bf33421c 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -55,6 +55,7 @@ use std::time::Duration; use bytes::Bytes; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::trace; use super::accept::Accept; use crate::body::{Body, HttpBody}; @@ -1037,6 +1038,7 @@ where pub(crate) mod spawn_all { use std::error::Error as StdError; use tokio::io::{AsyncRead, AsyncWrite}; + use tracing::debug; use super::{Connecting, UpgradeableConnection}; use crate::body::{Body, HttpBody}; diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index 122853ac17..8a2ecaf306 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -2,6 +2,7 @@ use std::error::Error as StdError; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::debug; use super::accept::Accept; use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 792e0034f3..013bdaea1d 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -5,6 +5,7 @@ use std::time::Duration; use tokio::net::TcpListener; use tokio::time::Sleep; +use tracing::{debug, error, trace}; use crate::common::{task, Future, Pin, Poll}; diff --git a/src/upgrade.rs b/src/upgrade.rs index efab10a6fc..be4e48220f 100644 --- a/src/upgrade.rs +++ b/src/upgrade.rs @@ -14,6 +14,8 @@ use std::marker::Unpin; use bytes::Bytes; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::sync::oneshot; +#[cfg(any(feature = "http1", feature = "http2"))] +use tracing::trace; use crate::common::io::Rewind; use crate::common::{task, Future, Pin, Poll}; From 07f920871454479a4c21301c51630e073aafdefb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 14:48:37 +0200 Subject: [PATCH 113/420] refactor(lib): Inline cfg_http1, cfg_http2 macros --- src/cfg.rs | 18 ------------------ src/proto/mod.rs | 9 +++++---- 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/src/cfg.rs b/src/cfg.rs index 1533951b5b..71a5351d21 100644 --- a/src/cfg.rs +++ b/src/cfg.rs @@ -24,24 +24,6 @@ macro_rules! cfg_proto { } cfg_proto! { - macro_rules! cfg_http1 { - ($($item:item)*) => { - cfg_feature! { - #![feature = "http1"] - $($item)* - } - } - } - - macro_rules! cfg_http2 { - ($($item:item)*) => { - cfg_feature! { - #![feature = "http2"] - $($item)* - } - } - } - macro_rules! cfg_client { ($($item:item)*) => { cfg_feature! { diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 513b70f86f..f938bf532b 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -1,6 +1,8 @@ //! Pieces pertaining to the HTTP message protocol. -cfg_http1! { +cfg_feature! { + #![feature = "http1"] + pub(crate) mod h1; pub(crate) use self::h1::Conn; @@ -11,9 +13,8 @@ cfg_http1! { pub(crate) use self::h1::ServerTransaction; } -cfg_http2! { - pub(crate) mod h2; -} +#[cfg(feature = "http2")] +pub(crate) mod h2; /// An Incoming Message head. Includes request/status line, and headers. #[derive(Debug, Default)] From db57316e14a663c2da9af66cd831b3df767efeb3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 20:36:06 +0200 Subject: [PATCH 114/420] refactor(lib): Clean up cfg usage --- src/common/mod.rs | 7 +------ src/lib.rs | 4 ++-- src/server/mod.rs | 8 ++------ src/server/server.rs | 6 ++---- 4 files changed, 7 insertions(+), 18 deletions(-) diff --git a/src/common/mod.rs b/src/common/mod.rs index 050c61d15c..c017154adf 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -25,12 +25,7 @@ pub(crate) mod watch; #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] pub(crate) use self::lazy::{lazy, Started as Lazy}; -#[cfg(any( - feature = "client", - feature = "http1", - feature = "http2", - feature = "runtime" -))] +#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))] pub(crate) use self::never::Never; pub(crate) use self::task::Poll; diff --git a/src/lib.rs b/src/lib.rs index 3c07272379..a85a27699f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -93,7 +93,7 @@ cfg_proto! { } cfg_feature! { - #![all(feature = "client")] + #![feature = "client"] pub mod client; #[cfg(any(feature = "http1", feature = "http2"))] @@ -102,7 +102,7 @@ cfg_feature! { } cfg_feature! { - #![all(feature = "server")] + #![feature = "server"] pub mod server; #[cfg(any(feature = "http1", feature = "http2"))] diff --git a/src/server/mod.rs b/src/server/mod.rs index 690c8127a7..6f691f610c 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -157,10 +157,6 @@ cfg_feature! { pub mod conn; mod server; mod shutdown; - - cfg_feature! { - #![feature = "tcp"] - - mod tcp; - } + #[cfg(feature = "tcp")] + mod tcp; } diff --git a/src/server/server.rs b/src/server/server.rs index bdd517808b..810ac7127d 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -353,8 +353,7 @@ impl Builder { /// # Cargo Feature /// /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "runtime", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { self.protocol.http2_keep_alive_interval(interval); @@ -371,8 +370,7 @@ impl Builder { /// # Cargo Feature /// /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] + #[cfg(all(feature = "runtime", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { self.protocol.http2_keep_alive_timeout(timeout); From d49c25b22480909e730a7e06976d0340cd5c06c1 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 20:52:05 +0200 Subject: [PATCH 115/420] refactor(server): Merge imports --- src/server/conn.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/server/conn.rs b/src/server/conn.rs index 07bf33421c..5783bb24d8 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -73,8 +73,7 @@ use crate::service::{HttpService, MakeServiceRef}; use crate::upgrade::Upgraded; use self::spawn_all::NewSvcTask; -pub(super) use self::spawn_all::NoopWatcher; -pub(super) use self::spawn_all::Watcher; +pub(super) use self::spawn_all::{NoopWatcher, Watcher}; pub(super) use self::upgrades::UpgradeableConnection; #[cfg(feature = "tcp")] From cf6f62c71eda3b3a8732d86387e4ed8711cf9a86 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 21:34:28 +0200 Subject: [PATCH 116/420] feat(lib): Export rt module independently of Cargo features --- src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index a85a27699f..eaf6a14318 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,7 +79,6 @@ mod error; mod ext; #[cfg(test)] mod mock; -#[cfg(any(feature = "http1", feature = "http2",))] pub mod rt; pub mod service; pub mod upgrade; From 0a4b56acb82ef41a3336f482b240c67c784c434f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 29 Aug 2021 22:24:44 +0200 Subject: [PATCH 117/420] feat(lib): Export more things with Cargo features [server, !http1, !http2] * server::Server * server::conn::{AddrIncoming, AddrStream} This allows higher-level libraries to use or re-export more parts of the API without deciding for the end user which HTTP versions the hyper server will support. --- src/common/exec.rs | 14 ++++---- src/common/mod.rs | 2 +- src/error.rs | 12 ++----- src/lib.rs | 1 - src/server/conn.rs | 84 ++++++++++++++++++++++++++++++++------------ src/server/mod.rs | 12 ++++--- src/server/server.rs | 68 ++++++++++++++++++++++++----------- 7 files changed, 127 insertions(+), 66 deletions(-) diff --git a/src/common/exec.rs b/src/common/exec.rs index f7560efbfc..b6da9a276b 100644 --- a/src/common/exec.rs +++ b/src/common/exec.rs @@ -3,14 +3,16 @@ use std::future::Future; use std::pin::Pin; use std::sync::Arc; +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] +use crate::body::Body; #[cfg(feature = "server")] -use crate::body::{Body, HttpBody}; +use crate::body::HttpBody; #[cfg(all(feature = "http2", feature = "server"))] use crate::proto::h2::server::H2Stream; use crate::rt::Executor; -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] use crate::server::conn::spawn_all::{NewSvcTask, Watcher}; -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] use crate::service::HttpService; #[cfg(feature = "server")] @@ -18,7 +20,7 @@ pub trait ConnStreamExec: Clone { fn execute_h2stream(&mut self, fut: H2Stream); } -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub trait NewSvcExec, E, W: Watcher>: Clone { fn execute_new_svc(&mut self, fut: NewSvcTask); } @@ -76,7 +78,7 @@ where } } -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] impl NewSvcExec for Exec where NewSvcTask: Future + Send + 'static, @@ -102,7 +104,7 @@ where } } -#[cfg(feature = "server")] +#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] impl NewSvcExec for E where E: Executor> + Clone, diff --git a/src/common/mod.rs b/src/common/mod.rs index c017154adf..b9f5907a7e 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -12,7 +12,7 @@ pub(crate) mod buf; pub(crate) mod date; #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod drain; -#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] pub(crate) mod exec; pub(crate) mod io; #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] diff --git a/src/error.rs b/src/error.rs index bd798667ee..5d350278f3 100644 --- a/src/error.rs +++ b/src/error.rs @@ -38,11 +38,7 @@ pub(super) enum Kind { #[allow(unused)] Connect, /// Error creating a TcpListener. - #[cfg(all( - any(feature = "http1", feature = "http2"), - feature = "tcp", - feature = "server" - ))] + #[cfg(all(feature = "tcp", feature = "server"))] Listen, /// Error accepting on an Incoming stream. #[cfg(any(feature = "http1", feature = "http2"))] @@ -265,8 +261,7 @@ impl Error { Error::new(Kind::Io).with(cause) } - #[cfg(all(any(feature = "http1", feature = "http2"), feature = "tcp"))] - #[cfg(feature = "server")] + #[cfg(all(feature = "server", feature = "tcp"))] pub(super) fn new_listen>(cause: E) -> Error { Error::new(Kind::Listen).with(cause) } @@ -410,8 +405,7 @@ impl Error { Kind::ChannelClosed => "channel closed", Kind::Connect => "error trying to connect", Kind::Canceled => "operation was canceled", - #[cfg(all(any(feature = "http1", feature = "http2"), feature = "tcp"))] - #[cfg(feature = "server")] + #[cfg(all(feature = "server", feature = "tcp"))] Kind::Listen => "error creating server listener", #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "server")] diff --git a/src/lib.rs b/src/lib.rs index eaf6a14318..41a0b37518 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -104,7 +104,6 @@ cfg_feature! { #![feature = "server"] pub mod server; - #[cfg(any(feature = "http1", feature = "http2"))] #[doc(no_inline)] pub use crate::server::Server; } diff --git a/src/server/conn.rs b/src/server/conn.rs index 5783bb24d8..5872033277 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -43,38 +43,47 @@ //! # } //! ``` -use std::error::Error as StdError; -use std::fmt; -#[cfg(not(all(feature = "http1", feature = "http2")))] +#[cfg(all( + any(feature = "http1", feature = "http2"), + not(all(feature = "http1", feature = "http2")) +))] use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; #[cfg(all(feature = "runtime", feature = "http2"))] use std::time::Duration; -use bytes::Bytes; -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::trace; - -use super::accept::Accept; -use crate::body::{Body, HttpBody}; -use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; #[cfg(feature = "http2")] use crate::common::io::Rewind; -#[cfg(not(all(feature = "http1", feature = "http2")))] -use crate::common::Never; -use crate::common::{task, Future, Pin, Poll, Unpin}; #[cfg(all(feature = "http1", feature = "http2"))] use crate::error::{Kind, Parse}; -use crate::proto; -use crate::service::{HttpService, MakeServiceRef}; #[cfg(feature = "http1")] use crate::upgrade::Upgraded; -use self::spawn_all::NewSvcTask; -pub(super) use self::spawn_all::{NoopWatcher, Watcher}; -pub(super) use self::upgrades::UpgradeableConnection; +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + use std::error::Error as StdError; + use std::fmt; + + use bytes::Bytes; + use pin_project_lite::pin_project; + use tokio::io::{AsyncRead, AsyncWrite}; + use tracing::trace; + + use super::accept::Accept; + use crate::body::{Body, HttpBody}; + use crate::common::{task, Future, Pin, Poll, Unpin}; + #[cfg(not(all(feature = "http1", feature = "http2")))] + use crate::common::Never; + use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; + use crate::proto; + use crate::service::{HttpService, MakeServiceRef}; + use self::spawn_all::NewSvcTask; + + pub(super) use self::spawn_all::{NoopWatcher, Watcher}; + pub(super) use self::upgrades::UpgradeableConnection; +} #[cfg(feature = "tcp")] pub use super::tcp::{AddrIncoming, AddrStream}; @@ -86,6 +95,8 @@ pub use super::tcp::{AddrIncoming, AddrStream}; /// If you don't have need to manage connections yourself, consider using the /// higher-level [Server](super) API. #[derive(Clone, Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Http { exec: E, h1_half_close: bool, @@ -100,6 +111,7 @@ pub struct Http { } /// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs. +#[cfg(any(feature = "http1", feature = "http2"))] #[derive(Clone, Debug, PartialEq)] enum ConnectionMode { /// Always use HTTP/1 and do not upgrade when a parse error occurs. @@ -113,6 +125,7 @@ enum ConnectionMode { Fallback, } +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A stream mapping incoming IOs to new services. /// @@ -127,6 +140,7 @@ pin_project! { } } +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A future building a new `Service` to a `Connection`. /// @@ -134,6 +148,7 @@ pin_project! { /// a `Connection`. #[must_use = "futures do nothing unless polled"] #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Connecting { #[pin] future: F, @@ -142,6 +157,7 @@ pin_project! { } } +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { #[must_use = "futures do nothing unless polled"] #[derive(Debug)] @@ -154,11 +170,13 @@ pin_project! { } } +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A future binding a connection with a Service. /// /// Polling this future will drive HTTP forward. #[must_use = "futures do nothing unless polled"] + #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Connection where S: HttpService, @@ -172,18 +190,19 @@ pin_project! { type Http1Dispatcher = proto::h1::Dispatcher, B, T, proto::ServerTransaction>; -#[cfg(not(feature = "http1"))] +#[cfg(all(not(feature = "http1"), feature = "http2"))] type Http1Dispatcher = (Never, PhantomData<(T, Box>, Box>)>); #[cfg(feature = "http2")] type Http2Server = proto::h2::Server, S, B, E>; -#[cfg(not(feature = "http2"))] +#[cfg(all(not(feature = "http2"), feature = "http1"))] type Http2Server = ( Never, PhantomData<(T, Box>, Box>, Box>)>, ); +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { #[project = ProtoServerProj] pub(super) enum ProtoServer @@ -209,7 +228,10 @@ enum Fallback { Http1Only, } -#[cfg(not(all(feature = "http1", feature = "http2")))] +#[cfg(all( + any(feature = "http1", feature = "http2"), + not(all(feature = "http1", feature = "http2")) +))] type Fallback = PhantomData; #[cfg(all(feature = "http1", feature = "http2"))] @@ -230,6 +252,8 @@ impl Unpin for Fallback {} /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -249,6 +273,7 @@ pub struct Parts { // ===== impl Http ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Creates a new instance of the HTTP protocol, ready to spawn a server or /// start accepting connections. @@ -268,6 +293,7 @@ impl Http { } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Sets whether HTTP1 is required. /// @@ -633,6 +659,7 @@ impl Http { // ===== impl Connection ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Connection where S: HttpService, @@ -802,6 +829,7 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for Connection where S: HttpService, @@ -848,6 +876,7 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] impl fmt::Debug for Connection where S: HttpService, @@ -859,6 +888,7 @@ where // ===== impl ConnectionMode ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Default for ConnectionMode { #[cfg(all(feature = "http1", feature = "http2"))] fn default() -> ConnectionMode { @@ -878,6 +908,7 @@ impl Default for ConnectionMode { // ===== impl Serve ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Serve { /// Get a reference to the incoming stream. #[inline] @@ -899,6 +930,7 @@ impl Serve { } } +#[cfg(any(feature = "http1", feature = "http2"))] impl Serve where I: Accept, @@ -937,6 +969,7 @@ where // ===== impl Connecting ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for Connecting where I: AsyncRead + AsyncWrite + Unpin, @@ -958,19 +991,21 @@ where // ===== impl SpawnAll ===== -#[cfg(feature = "tcp")] +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] impl SpawnAll { pub(super) fn local_addr(&self) -> SocketAddr { self.serve.incoming.local_addr() } } +#[cfg(any(feature = "http1", feature = "http2"))] impl SpawnAll { pub(super) fn incoming_ref(&self) -> &I { self.serve.incoming_ref() } } +#[cfg(any(feature = "http1", feature = "http2"))] impl SpawnAll where I: Accept, @@ -1008,6 +1043,7 @@ where // ===== impl ProtoServer ===== +#[cfg(any(feature = "http1", feature = "http2"))] impl Future for ProtoServer where T: AsyncRead + AsyncWrite + Unpin, @@ -1034,6 +1070,7 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] pub(crate) mod spawn_all { use std::error::Error as StdError; use tokio::io::{AsyncRead, AsyncWrite}; @@ -1177,6 +1214,7 @@ pub(crate) mod spawn_all { } } +#[cfg(any(feature = "http1", feature = "http2"))] mod upgrades { use super::*; diff --git a/src/server/mod.rs b/src/server/mod.rs index 6f691f610c..a97944f518 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -148,15 +148,17 @@ //! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html pub mod accept; +pub mod conn; +mod server; +#[cfg(feature = "tcp")] +mod tcp; + +pub use self::server::Server; cfg_feature! { #![any(feature = "http1", feature = "http2")] - pub use self::server::{Builder, Server}; + pub use self::server::Builder; - pub mod conn; - mod server; mod shutdown; - #[cfg(feature = "tcp")] - mod tcp; } diff --git a/src/server/server.rs b/src/server/server.rs index 810ac7127d..377f7cb617 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -1,26 +1,33 @@ -use std::error::Error as StdError; use std::fmt; #[cfg(feature = "tcp")] use std::net::{SocketAddr, TcpListener as StdTcpListener}; - #[cfg(feature = "tcp")] use std::time::Duration; -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; - -use super::accept::Accept; -use crate::body::{Body, HttpBody}; -use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::service::{HttpService, MakeServiceRef}; -// Renamed `Http` as `Http_` for now so that people upgrading don't see an -// error that `hyper::server::Http` is private... -use super::conn::{Http as Http_, NoopWatcher, SpawnAll}; -use super::shutdown::{Graceful, GracefulWatcher}; -#[cfg(feature = "tcp")] +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] use super::tcp::AddrIncoming; +use crate::common::exec::Exec; + +cfg_feature! { + #![any(feature = "http1", feature = "http2")] + + use std::error::Error as StdError; + + use pin_project_lite::pin_project; + use tokio::io::{AsyncRead, AsyncWrite}; + + use super::accept::Accept; + use crate::body::{Body, HttpBody}; + use crate::common::{task, Future, Pin, Poll, Unpin}; + use crate::common::exec::{ConnStreamExec, NewSvcExec}; + // Renamed `Http` as `Http_` for now so that people upgrading don't see an + // error that `hyper::server::Http` is private... + use super::conn::{Http as Http_, NoopWatcher, SpawnAll}; + use super::shutdown::{Graceful, GracefulWatcher}; + use crate::service::{HttpService, MakeServiceRef}; +} +#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. /// @@ -34,8 +41,18 @@ pin_project! { } } +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. +/// +/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful. +#[cfg(not(any(feature = "http1", feature = "http2")))] +pub struct Server { + _marker: std::marker::PhantomData<(I, S, E)>, +} + /// A builder for a [`Server`](Server). #[derive(Debug)] +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Builder { incoming: I, protocol: Http_, @@ -43,6 +60,8 @@ pub struct Builder { // ===== impl Server ===== +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Server { /// Starts a [`Builder`](Builder) with the provided incoming stream. pub fn builder(incoming: I) -> Builder { @@ -54,7 +73,7 @@ impl Server { } cfg_feature! { - #![all(feature = "tcp")] + #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] impl Server { /// Binds to the provided address, and returns a [`Builder`](Builder). @@ -83,7 +102,7 @@ cfg_feature! { } cfg_feature! { - #![all(feature = "tcp")] + #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] impl Server { /// Returns the local address that this server is bound to. @@ -93,6 +112,8 @@ cfg_feature! { } } +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Server where I: Accept, @@ -149,6 +170,8 @@ where } } +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Future for Server where I: Accept, @@ -170,14 +193,17 @@ where impl fmt::Debug for Server { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Server") - .field("listener", &self.spawn_all.incoming_ref()) - .finish() + let mut st = f.debug_struct("Server"); + #[cfg(any(feature = "http1", feature = "http2"))] + st.field("listener", &self.spawn_all.incoming_ref()); + st.finish() } } // ===== impl Builder ===== +#[cfg(any(feature = "http1", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Builder { /// Start a new builder, wrapping an incoming stream and low-level options. /// @@ -435,7 +461,7 @@ impl Builder { } } -#[cfg(feature = "tcp")] +#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] impl Builder { /// Set whether TCP keepalive messages are enabled on accepted connections. /// From e3ab409808a6aa06ebacaaa936cb926785913d24 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 14 Sep 2021 16:30:20 -0700 Subject: [PATCH 118/420] refactor(lints): fix unused warnings in tests/docs --- examples/client_json.rs | 1 + src/client/pool.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/examples/client_json.rs b/examples/client_json.rs index 5bb585b935..fa99e2c579 100644 --- a/examples/client_json.rs +++ b/examples/client_json.rs @@ -41,5 +41,6 @@ async fn fetch_json(url: hyper::Uri) -> Result> { #[derive(Deserialize, Debug)] struct User { id: i32, + #[allow(unused)] name: String, } diff --git a/src/client/pool.rs b/src/client/pool.rs index 44d4df57e8..4080ad5f66 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -1005,6 +1005,7 @@ mod tests { #[derive(Debug)] struct CanClose { + #[allow(unused)] val: i32, closed: bool, } From c88011da4ed5b5ca9107c4a2339a7ab054c5f27f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 14 Sep 2021 16:18:28 -0700 Subject: [PATCH 119/420] fix(client): don't reuse a connection while still flushing A client connection that read a full response while the request body was still flushing would see incorrect behavior, since the pool would let it be checked out again for a new request. In debug builds, it would then panic, but in release builds it would intermix the 2nd request bytes with the body of the previous request. In practice, this only ever happens if a server replies with a full response before reading the full request, while also choosing to not close that connection. Most servers either wait for the full request, or close the connection after the new response is written, so as to stop reading. --- src/proto/h1/conn.rs | 7 ++++++- src/proto/h1/dispatch.rs | 29 ++++++++++++++++++++++++++++- src/proto/h1/io.rs | 16 +++++++++++++++- 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index c84689e0b1..887dee48e5 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -71,6 +71,11 @@ where self.io.set_flush_pipeline(enabled); } + #[cfg(test)] + pub(crate) fn set_write_strategy_queue(&mut self) { + self.io.set_write_strategy_queue(); + } + pub(crate) fn set_max_buf_size(&mut self, max: usize) { self.io.set_max_buf_size(max); } @@ -461,7 +466,7 @@ where } } match self.state.writing { - Writing::Init => true, + Writing::Init => self.io.can_headers_buf(), _ => false, } } diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index d2c1428a75..677131bfdd 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -665,7 +665,6 @@ mod tests { // Block at 0 for now, but we will release this response before // the request is ready to write later... - //let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0); let (mut tx, rx) = crate::client::dispatch::channel(); let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); let mut dispatcher = Dispatcher::new(Client::new(rx), conn); @@ -692,6 +691,34 @@ mod tests { }); } + #[tokio::test] + async fn client_flushing_is_not_ready_for_next_request() { + let _ = pretty_env_logger::try_init(); + + let (io, _handle) = tokio_test::io::Builder::new() + .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n") + .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n") + .wait(std::time::Duration::from_secs(2)) + .build_with_handle(); + + let (mut tx, rx) = crate::client::dispatch::channel(); + let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + conn.set_write_strategy_queue(); + + let dispatcher = Dispatcher::new(Client::new(rx), conn); + let _dispatcher = tokio::spawn(async move { dispatcher.await }); + + let req = crate::Request::builder() + .method("POST") + .body(crate::Body::from("reee")) + .unwrap(); + + let res = tx.try_send(req).unwrap().await.expect("response"); + drop(res); + + assert!(!tx.is_ready()); + } + #[tokio::test] async fn body_empty_chunks_ignored() { let _ = pretty_env_logger::try_init(); diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index db4eece695..a7523001bc 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -98,13 +98,18 @@ where } #[cfg(feature = "server")] - pub(crate) fn set_write_strategy_flatten(&mut self) { + fn set_write_strategy_flatten(&mut self) { // this should always be called only at construction time, // so this assert is here to catch myself debug_assert!(self.write_buf.queue.bufs_cnt() == 0); self.write_buf.set_strategy(WriteStrategy::Flatten); } + #[cfg(test)] + pub(crate) fn set_write_strategy_queue(&mut self) { + self.write_buf.set_strategy(WriteStrategy::Queue); + } + pub(crate) fn read_buf(&self) -> &[u8] { self.read_buf.as_ref() } @@ -121,6 +126,15 @@ where self.read_buf.capacity() - self.read_buf.len() } + /// Return whether we can append to the headers buffer. + /// + /// Reasons we can't: + /// - The write buf is in queue mode, and some of the past body is still + /// needing to be flushed. + pub(crate) fn can_headers_buf(&self) -> bool { + !self.write_buf.queue.has_remaining() + } + pub(crate) fn headers_buf(&mut self) -> &mut Vec { let buf = self.write_buf.headers_mut(); &mut buf.bytes From ea3e228287e714b97aa44c840a487abd3a915e15 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Sep 2021 15:26:01 -0700 Subject: [PATCH 120/420] fix(server): convert panic to error if Connection::without_shutdown called on HTTP/2 conn --- src/error.rs | 13 +++++++++++++ src/server/conn.rs | 10 +++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/error.rs b/src/error.rs index 5d350278f3..470a23b601 100644 --- a/src/error.rs +++ b/src/error.rs @@ -128,6 +128,10 @@ pub(super) enum User { #[cfg(feature = "http1")] ManualUpgrade, + /// User called `server::Connection::without_shutdown()` on an HTTP/2 conn. + #[cfg(feature = "server")] + WithoutShutdownNonHttp1, + /// User aborted in an FFI callback. #[cfg(feature = "ffi")] AbortedByCallback, @@ -355,6 +359,11 @@ impl Error { Error::new_user(User::Body).with(cause) } + #[cfg(feature = "server")] + pub(super) fn new_without_shutdown_not_h1() -> Error { + Error::new(Kind::User(User::WithoutShutdownNonHttp1)) + } + #[cfg(feature = "http1")] pub(super) fn new_shutdown(cause: std::io::Error) -> Error { Error::new(Kind::Shutdown).with(cause) @@ -449,6 +458,10 @@ impl Error { Kind::User(User::NoUpgrade) => "no upgrade available", #[cfg(feature = "http1")] Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", + #[cfg(feature = "server")] + Kind::User(User::WithoutShutdownNonHttp1) => { + "without_shutdown() called on a non-HTTP/1 connection" + } #[cfg(feature = "ffi")] Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", } diff --git a/src/server/conn.rs b/src/server/conn.rs index 5872033277..b488f8ba7e 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -741,10 +741,6 @@ where /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. - /// - /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) - /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) - /// to work with this function; or use the `without_shutdown` wrapper. pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> where S: Unpin, @@ -782,6 +778,10 @@ where /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. pub fn without_shutdown(self) -> impl Future>> where S: Unpin, @@ -791,7 +791,7 @@ where let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(Ok(conn.take().unwrap().into_parts())) + Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) }) } From a54689b921ca16dd0f29b3f4a74feae60218db34 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 20 Jun 2021 10:47:22 -0500 Subject: [PATCH 121/420] feat(ffi): add hyper_request_set_uri_parts Add a second FFI interface for setting the URI of a request with three separate schema, authority, and path/query strings, rather than one URI string. --- capi/include/hyper.h | 17 +++++++++++++++ src/ffi/http_types.rs | 48 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 2245d6a45d..7d79825865 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -430,6 +430,23 @@ enum hyper_code hyper_request_set_uri(struct hyper_request *req, const uint8_t *uri, size_t uri_len); +/* + Set the URI of the request with separate scheme, authority, and + path/query strings. + + Each of `scheme`, `authority`, and `path_and_query` should either be + null, to skip providing a component, or point to a UTF-8 encoded + string. If any string pointer argument is non-null, its corresponding + `len` parameter must be set to the string's length. + */ +enum hyper_code hyper_request_set_uri_parts(struct hyper_request *req, + const uint8_t *scheme, + size_t scheme_len, + const uint8_t *authority, + size_t authority_len, + const uint8_t *path_and_query, + size_t path_and_query_len); + /* Set the preferred HTTP version of the request. diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index e7daaeba79..c80d67fd2b 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -90,6 +90,54 @@ ffi_fn! { } } +ffi_fn! { + /// Set the URI of the request with separate scheme, authority, and + /// path/query strings. + /// + /// Each of `scheme`, `authority`, and `path_and_query` should either be + /// null, to skip providing a component, or point to a UTF-8 encoded + /// string. If any string pointer argument is non-null, its corresponding + /// `len` parameter must be set to the string's length. + fn hyper_request_set_uri_parts( + req: *mut hyper_request, + scheme: *const u8, + scheme_len: size_t, + authority: *const u8, + authority_len: size_t, + path_and_query: *const u8, + path_and_query_len: size_t + ) -> hyper_code { + let mut builder = Uri::builder(); + if !scheme.is_null() { + let scheme_bytes = unsafe { + std::slice::from_raw_parts(scheme, scheme_len as usize) + }; + builder = builder.scheme(scheme_bytes); + } + if !authority.is_null() { + let authority_bytes = unsafe { + std::slice::from_raw_parts(authority, authority_len as usize) + }; + builder = builder.authority(authority_bytes); + } + if !path_and_query.is_null() { + let path_and_query_bytes = unsafe { + std::slice::from_raw_parts(path_and_query, path_and_query_len as usize) + }; + builder = builder.path_and_query(path_and_query_bytes); + } + match builder.build() { + Ok(u) => { + *unsafe { &mut *req }.0.uri_mut() = u; + hyper_code::HYPERE_OK + }, + Err(_) => { + hyper_code::HYPERE_INVALID_ARG + } + } + } +} + ffi_fn! { /// Set the preferred HTTP version of the request. /// From 949216f5839bc79d027f0ac4c1c4fb83a8f4cbd0 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 16 Sep 2021 10:00:38 -0700 Subject: [PATCH 122/420] docs(ffi): expand URI documentation --- capi/include/hyper.h | 13 +++++++++++++ src/ffi/http_types.rs | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 7d79825865..efe5f06106 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -425,6 +425,19 @@ enum hyper_code hyper_request_set_method(struct hyper_request *req, /* Set the URI of the request. + + The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1, + whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It + supports the 4 defined variants, origin-form, absolute-form, authority-form, and + asterisk-form. + + The underlying type was built to efficiently support HTTP/2 where the request-target is + split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the + type can parse a single contiguous string and if a scheme is found, that slot is "set". If + the string just starts with a path, only the path portion is set. All pseudo headers that + have been parsed/set are sent when the connection type is HTTP/2. + + To set each slot explicitly, use `hyper_request_set_uri_parts`. */ enum hyper_code hyper_request_set_uri(struct hyper_request *req, const uint8_t *uri, diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index c80d67fd2b..f6d32947bf 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -73,6 +73,19 @@ ffi_fn! { ffi_fn! { /// Set the URI of the request. + /// + /// The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1, + /// whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It + /// supports the 4 defined variants, origin-form, absolute-form, authority-form, and + /// asterisk-form. + /// + /// The underlying type was built to efficiently support HTTP/2 where the request-target is + /// split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the + /// type can parse a single contiguous string and if a scheme is found, that slot is "set". If + /// the string just starts with a path, only the path portion is set. All pseudo headers that + /// have been parsed/set are sent when the connection type is HTTP/2. + /// + /// To set each slot explicitly, use `hyper_request_set_uri_parts`. fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code { let bytes = unsafe { std::slice::from_raw_parts(uri, uri_len as usize) From 6a876b8ed28ffa7daf04d8a9e642676a874e2668 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 16 Sep 2021 10:25:36 -0700 Subject: [PATCH 123/420] v0.14.13 --- CHANGELOG.md | 17 +++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46dfc851f1..85044dbe1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +### v0.14.13 (2021-09-16) + + +#### Bug Fixes + +* **client:** don't reuse a connection while still flushing ([c88011da](https://github.com/hyperium/hyper/commit/c88011da4ed5b5ca9107c4a2339a7ab054c5f27f)) +* **server:** convert panic to error if Connection::without_shutdown called on HTTP/2 conn ([ea3e2282](https://github.com/hyperium/hyper/commit/ea3e228287e714b97aa44c840a487abd3a915e15)) + + +#### Features + +* **ffi:** add hyper_request_set_uri_parts ([a54689b9](https://github.com/hyperium/hyper/commit/a54689b921ca16dd0f29b3f4a74feae60218db34)) +* **lib:** + * Export more things with Cargo features (server, !http1, !http2) ([0a4b56ac](https://github.com/hyperium/hyper/commit/0a4b56acb82ef41a3336f482b240c67c784c434f)) + * Export rt module independently of Cargo features ([cf6f62c7](https://github.com/hyperium/hyper/commit/cf6f62c71eda3b3a8732d86387e4ed8711cf9a86)) + + ### v0.14.12 (2021-08-24) diff --git a/Cargo.toml b/Cargo.toml index b171ca5a37..1fa5520e03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.12" +version = "0.14.13" description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" From e6b2cbda03ea794d8a9ed17c233e0743041beae2 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 16 Sep 2021 12:10:36 -0700 Subject: [PATCH 124/420] docs(dev): start a set of "dev" docs Initially this creates a top-level "dev" directory to hold documents pertaining to the development, as opposed to the usage, of hyper. For a first doc, it splits out the commit guidelines to its own file. cc #2586 --- CONTRIBUTING.md | 77 +------------------------------------------------ dev/COMMITS.md | 65 +++++++++++++++++++++++++++++++++++++++++ dev/README.md | 3 ++ 3 files changed, 69 insertions(+), 76 deletions(-) create mode 100644 dev/COMMITS.md create mode 100644 dev/README.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eacb0e7013..ba4b4213f1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,79 +7,4 @@ You want to contribute? You're awesome! Don't know where to start? Check the [li ## Pull Requests -When submitting a Pull Request, please have your commits follow these guidelines: - -### Git Commit Guidelines - -These guidelines have been copied from the [AngularJS](https://github.com/angular/angular.js/blob/master/CONTRIBUTING.md#-git-commit-guidelines) -project. - -We have very precise rules over how our git commit messages can be formatted. This leads to **more -readable messages** that are easy to follow when looking through the **project history**. But also, -we use the git commit messages to **generate the change log**. - -#### Commit Message Format -Each commit message consists of a **header**, a **body** and a **footer**. The header has a special -format that includes a **type**, a **scope** and a **subject**: - -``` -(): - - - -