diff --git a/Cargo.lock b/Cargo.lock index 910b2e940..72e444cfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,12 +124,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" -dependencies = [ - "backtrace", -] +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arrayref" @@ -349,12 +346,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "btparse" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387e80962b798815a2b5c4bcfdb6bf626fa922ffe9f74e373103b858738e9f31" - [[package]] name = "bumpalo" version = "3.19.0" @@ -491,17 +482,6 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "color-backtrace" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2123a5984bd52ca861c66f66a9ab9883b27115c607f801f86c1bc2a84eb69f0f" -dependencies = [ - "backtrace", - "btparse", - "termcolor", -] - [[package]] name = "colorchoice" version = "1.0.4" @@ -1707,7 +1687,6 @@ dependencies = [ "irpc", "n0-error", "n0-future", - "n0-snafu", "nested_enum_utils", "postcard", "proptest", @@ -1721,7 +1700,6 @@ dependencies = [ "serde_json", "serde_test", "smallvec", - "snafu", "tempfile", "test-strategy", "testresult", @@ -2132,22 +2110,20 @@ dependencies = [ [[package]] name = "n0-error" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4839a11b62f1fdd75be912ee20634053c734c2240e867ded41c7f50822c549" +checksum = "c7d5969a2f40e9d9ed121a789c415f4114ac2b28e5731c080bdefee217d3b3fb" dependencies = [ - "derive_more 2.0.1", "n0-error-macros", "spez", ] [[package]] name = "n0-error-macros" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed2a7e5ca3cb5729d4a162d7bcab5b338bed299a2fee8457568d7e0a747ed89" +checksum = "9a6908df844696d9af91c7c3950d50e52d67df327d02a95367f95bbf177d6556" dependencies = [ - "heck", "proc-macro2", "quote", "syn 2.0.104", @@ -2174,19 +2150,6 @@ dependencies = [ "web-time", ] -[[package]] -name = "n0-snafu" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1815107e577a95bfccedb4cfabc73d709c0db6d12de3f14e0f284a8c5036dc4f" -dependencies = [ - "anyhow", - "btparse", - "color-backtrace", - "snafu", - "tracing-error", -] - [[package]] name = "n0-watcher" version = "0.5.0" @@ -2200,14 +2163,14 @@ dependencies = [ [[package]] name = "nested_enum_utils" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43fa9161ed44d30e9702fe42bd78693bceac0fed02f647da749f36109023d3a3" +checksum = "b1d5475271bdd36a4a2769eac1ef88df0f99428ea43e52dfd8b0ee5cb674695f" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.104", ] [[package]] @@ -3407,28 +3370,6 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fad6c857cbab2627dcf01ec85a623ca4e7dcb5691cbaa3d7fb7653671f0d09c9" -[[package]] -name = "snafu" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2" -dependencies = [ - "backtrace", - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.104", -] - [[package]] name = "socket2" version = "0.5.10" @@ -3655,15 +3596,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - [[package]] name = "test-strategy" version = "0.4.3" @@ -3962,16 +3894,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-error" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" -dependencies = [ - "tracing", - "tracing-subscriber", -] - [[package]] name = "tracing-log" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 52aff796e..035d7adf9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,17 +12,14 @@ keywords = ["hashing", "quic", "blake3", "streaming"] rust-version = "1.85" [dependencies] -anyhow = "1.0.95" bao-tree = { version = "0.16", features = ["experimental-mixed", "tokio_fsm", "validate", "serde"], default-features = false } bytes = { version = "1", features = ["serde"] } derive_more = { version = "2.0.1", features = ["from", "try_from", "into", "debug", "display", "deref", "deref_mut"] } futures-lite = "2.6.0" quinn = { package = "iroh-quinn", version = "0.14.0", optional = true } n0-future = "0.3.0" -n0-snafu = "0.2.2" range-collections = { version = "0.4.6", features = ["serde"] } smallvec = { version = "1", features = ["serde", "const_new"] } -snafu = "0.8.5" tokio = { version = "1.43.0", default-features = false, features = ["sync"] } tracing = "0.1.41" iroh-io = "0.6.1" @@ -32,7 +29,6 @@ serde = "1.0.217" postcard = { version = "1.1.1", features = ["experimental-derive", "use-std"] } data-encoding = "2.8.0" chrono = "0.4.39" -nested_enum_utils = "0.2.1" ref-cast = "1.0.24" arrayvec = "0.7.6" iroh = { version = "0.95", default-features = false } @@ -44,7 +40,8 @@ irpc = { version = "0.11.0", features = ["spans", "stream", "derive", "varint-ut iroh-metrics = { version = "0.37" } redb = { version = "2.6.3", optional = true } reflink-copy = { version = "0.1.24", optional = true } -n0-error = "0.1.0" +n0-error = "0.1.2" +nested_enum_utils = "0.2.3" [dev-dependencies] clap = { version = "4.5.31", features = ["derive"] } @@ -63,6 +60,7 @@ atomic_refcell = "0.1.13" iroh = { version = "0.95", features = ["discovery-local-network"]} async-compression = { version = "0.4.30", features = ["lz4", "tokio"] } concat_const = "0.2.0" +anyhow = "1.0.100" [build-dependencies] cfg_aliases = "0.2.1" diff --git a/README.md b/README.md index 0153f3269..5cd65002c 100644 --- a/README.md +++ b/README.md @@ -35,9 +35,10 @@ Here is a basic example of how to set up `iroh-blobs` with `iroh`: ```rust,no_run use iroh::{protocol::Router, Endpoint}; use iroh_blobs::{store::mem::MemStore, BlobsProtocol, ticket::BlobTicket}; +use n0_error::{Result, StdResultExt}; #[tokio::main] -async fn main() -> anyhow::Result<()> { +async fn main() -> Result<()> { // create an iroh endpoint that includes the standard discovery mechanisms // we've built at number0 let endpoint = Endpoint::bind().await?; @@ -45,7 +46,7 @@ async fn main() -> anyhow::Result<()> { // create a protocol handler using an in-memory blob store. let store = MemStore::new(); let tag = store.add_slice(b"Hello world").await?; - + let _ = endpoint.online().await; let addr = endpoint.addr(); let ticket = BlobTicket::new(addr, tag.hash, tag.format); @@ -62,7 +63,7 @@ async fn main() -> anyhow::Result<()> { tokio::signal::ctrl_c().await; // clean shutdown of router and store - router.shutdown().await?; + router.shutdown().await.anyerr()?; Ok(()) } ``` @@ -86,4 +87,4 @@ at your option. Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this project by you, as defined in the Apache-2.0 license, -shall be dual licensed as above, without any additional terms or conditions. \ No newline at end of file +shall be dual licensed as above, without any additional terms or conditions. diff --git a/src/api.rs b/src/api.rs index ec65a5c05..7c1d96854 100644 --- a/src/api.rs +++ b/src/api.rs @@ -16,12 +16,10 @@ use std::{io, ops::Deref}; use bao_tree::io::EncodeError; use iroh::Endpoint; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; +use n0_error::{e, stack_error}; use proto::{ShutdownRequest, SyncDbRequest}; use ref_cast::RefCast; use serde::{Deserialize, Serialize}; -use snafu::{Backtrace, IntoError, Snafu}; use tags::Tags; pub mod blobs; @@ -34,75 +32,74 @@ pub use crate::{store::util::Tag, util::temp_tag::TempTag}; pub(crate) type ApiClient = irpc::Client; -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] #[allow(missing_docs)] #[non_exhaustive] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] pub enum RequestError { /// Request failed due to rpc error. - #[snafu(display("rpc error: {source}"))] + #[error("rpc error: {source}")] Rpc { source: irpc::Error }, /// Request failed due an actual error. - #[snafu(display("inner error: {source}"))] - Inner { source: Error }, + #[error("inner error: {source}")] + Inner { + #[error(std_err)] + source: Error, + }, } impl From for RequestError { fn from(value: irpc::Error) -> Self { - RpcSnafu.into_error(value) + e!(RequestError::Rpc, value) } } impl From for RequestError { fn from(value: Error) -> Self { - InnerSnafu.into_error(value) + e!(RequestError::Inner, value) } } impl From for RequestError { fn from(value: io::Error) -> Self { - InnerSnafu.into_error(value.into()) + e!(RequestError::Inner, value.into()) } } impl From for RequestError { fn from(value: irpc::channel::mpsc::RecvError) -> Self { - RpcSnafu.into_error(value.into()) + e!(RequestError::Rpc, value.into()) } } pub type RequestResult = std::result::Result; -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] #[allow(missing_docs)] #[non_exhaustive] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] pub enum ExportBaoError { - #[snafu(display("send error: {source}"))] + #[error("send error")] Send { source: irpc::channel::SendError }, - #[snafu(display("mpsc recv error: {source}"))] + #[error("mpsc recv e api.acp.pro-channelsrror")] MpscRecv { source: irpc::channel::mpsc::RecvError, }, - #[snafu(display("oneshot recv error: {source}"))] + #[error("oneshot recv error")] OneshotRecv { source: irpc::channel::oneshot::RecvError, }, - #[snafu(display("request error: {source}"))] + #[error("request error")] Request { source: irpc::RequestError }, - #[snafu(display("io error: {source}"))] - ExportBaoIo { source: io::Error }, - #[snafu(display("encode error: {source}"))] - ExportBaoInner { source: bao_tree::io::EncodeError }, - #[snafu(display("client error: {source}"))] + #[error("io error")] + ExportBaoIo { + #[error(std_err)] + source: io::Error, + }, + #[error("encode error")] + ExportBaoInner { + #[error(std_err)] + source: bao_tree::io::EncodeError, + }, + #[error("client error")] ClientError { source: ProgressError }, } @@ -123,64 +120,23 @@ impl From for Error { impl From for ExportBaoError { fn from(e: irpc::Error) -> Self { match e { - irpc::Error::MpscRecv { source, .. } => MpscRecvSnafu.into_error(source), - irpc::Error::OneshotRecv { source, .. } => OneshotRecvSnafu.into_error(source), - irpc::Error::Send { source, .. } => SendSnafu.into_error(source), - irpc::Error::Request { source, .. } => RequestSnafu.into_error(source), + irpc::Error::MpscRecv { source: e, .. } => e!(ExportBaoError::MpscRecv, e), + irpc::Error::OneshotRecv { source: e, .. } => e!(ExportBaoError::OneshotRecv, e), + irpc::Error::Send { source: e, .. } => e!(ExportBaoError::Send, e), + irpc::Error::Request { source: e, .. } => e!(ExportBaoError::Request, e), #[cfg(feature = "rpc")] - irpc::Error::Write { source, .. } => ExportBaoIoSnafu.into_error(source.into()), + irpc::Error::Write { source: e, .. } => e!(ExportBaoError::ExportBaoIo, e.into()), } } } -impl From for ExportBaoError { - fn from(value: io::Error) -> Self { - ExportBaoIoSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: irpc::channel::mpsc::RecvError) -> Self { - MpscRecvSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: irpc::channel::oneshot::RecvError) -> Self { - OneshotRecvSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: irpc::channel::SendError) -> Self { - SendSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: irpc::RequestError) -> Self { - RequestSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: bao_tree::io::EncodeError) -> Self { - ExportBaoInnerSnafu.into_error(value) - } -} - -impl From for ExportBaoError { - fn from(value: ProgressError) -> Self { - ClientSnafu.into_error(value) - } -} - pub type ExportBaoResult = std::result::Result; -#[derive(Debug, derive_more::Display, derive_more::From, Serialize, Deserialize)] +#[derive(Serialize, Deserialize)] +#[stack_error(derive, std_sources, from_sources)] pub enum Error { #[serde(with = "crate::util::serde::io_error_serde")] - Io(io::Error), + Io(#[error(source)] io::Error), } impl Error { @@ -239,14 +195,6 @@ impl From for Error { } } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Io(e) => Some(e), - } - } -} - impl From for Error { fn from(value: EncodeError) -> Self { match value { diff --git a/src/api/blobs.rs b/src/api/blobs.rs index 82233e711..a79395a74 100644 --- a/src/api/blobs.rs +++ b/src/api/blobs.rs @@ -25,6 +25,7 @@ use bytes::Bytes; use genawaiter::sync::Gen; use iroh_io::AsyncStreamWriter; use irpc::channel::{mpsc, oneshot}; +use n0_error::AnyError; use n0_future::{future, stream, Stream, StreamExt}; use range_collections::{range_set::RangeSetRange, RangeSet2}; use ref_cast::RefCast; @@ -113,7 +114,7 @@ impl Blobs { /// use iroh_blobs::{store::mem::MemStore, api::blobs::Blobs}; /// use tokio::io::AsyncReadExt; /// - /// # async fn example() -> anyhow::Result<()> { + /// # async fn example() -> n0_error::Result<()> { /// let store = MemStore::new(); /// let tag = store.add_slice(b"Hello, world!").await?; /// let mut reader = store.reader(tag.hash); @@ -291,7 +292,7 @@ impl Blobs { sender.send(ImportByteStreamUpdate::Bytes(item?)).await?; } sender.send(ImportByteStreamUpdate::Done).await?; - anyhow::Ok(()) + n0_error::Ok(()) }; let _ = tokio::join!(send, recv); }); @@ -972,14 +973,14 @@ impl ExportBaoProgress { /// to get all non-corrupted sections. pub fn hashes_with_index( self, - ) -> impl Stream> { + ) -> impl Stream> { let mut stream = self.stream(); Gen::new(|co| async move { while let Some(item) = stream.next().await { let leaf = match item { EncodedItem::Leaf(leaf) => leaf, EncodedItem::Error(e) => { - co.yield_(Err(e.into())).await; + co.yield_(Err(AnyError::from_std(e))).await; continue; } _ => continue, @@ -1000,7 +1001,7 @@ impl ExportBaoProgress { } /// Same as [`Self::hashes_with_index`], but without the indexes. - pub fn hashes(self) -> impl Stream> { + pub fn hashes(self) -> impl Stream> { self.hashes_with_index().map(|x| x.map(|(_, hash)| hash)) } diff --git a/src/api/downloader.rs b/src/api/downloader.rs index 9f5bfbc2d..d3bd19770 100644 --- a/src/api/downloader.rs +++ b/src/api/downloader.rs @@ -6,10 +6,10 @@ use std::{ sync::Arc, }; -use anyhow::bail; use genawaiter::sync::Gen; use iroh::{Endpoint, EndpointId}; use irpc::{channel::mpsc, rpc_requests}; +use n0_error::{anyerr, Result}; use n0_future::{future, stream, task::JoinSet, BufferedStreamExt, Stream, StreamExt}; use rand::seq::SliceRandom; use serde::{de::Error, Deserialize, Serialize}; @@ -47,7 +47,7 @@ struct DownloaderActor { #[derive(Debug, Serialize, Deserialize)] pub enum DownloadProgressItem { #[serde(skip)] - Error(anyhow::Error), + Error(n0_error::AnyError), TryProvider { id: EndpointId, request: Arc, @@ -106,7 +106,7 @@ async fn handle_download_impl( pool: ConnectionPool, request: DownloadRequest, tx: &mut mpsc::Sender, -) -> anyhow::Result<()> { +) -> Result<()> { match request.strategy { SplitStrategy::Split => handle_download_split_impl(store, pool, request, tx).await?, SplitStrategy::None => match request.request { @@ -127,7 +127,7 @@ async fn handle_download_split_impl( pool: ConnectionPool, request: DownloadRequest, tx: &mut mpsc::Sender, -) -> anyhow::Result<()> { +) -> Result<()> { let providers = request.providers; let requests = split_request(&request.request, &providers, &pool, &store, Drain).await?; let (progress_tx, progress_rx) = tokio::sync::mpsc::channel(32); @@ -313,14 +313,16 @@ impl DownloadProgress { }))) } - async fn complete(self) -> anyhow::Result<()> { + async fn complete(self) -> Result<()> { let rx = self.fut.await?; let stream = rx.into_stream(); tokio::pin!(stream); while let Some(item) = stream.next().await { match item? { DownloadProgressItem::Error(e) => Err(e)?, - DownloadProgressItem::DownloadError => anyhow::bail!("Download error"), + DownloadProgressItem::DownloadError => { + n0_error::bail_any!("Download error"); + } _ => {} } } @@ -329,7 +331,7 @@ impl DownloadProgress { } impl IntoFuture for DownloadProgress { - type Output = anyhow::Result<()>; + type Output = Result<()>; type IntoFuture = future::Boxed; fn into_future(self) -> Self::IntoFuture { @@ -372,7 +374,7 @@ async fn split_request<'a>( pool: &ConnectionPool, store: &Store, progress: impl Sink, -) -> anyhow::Result + Send + 'a>> { +) -> Result + Send + 'a>> { Ok(match request { FiniteRequest::Get(req) => { let Some(_first) = req.ranges.iter_infinite().next() else { @@ -381,7 +383,7 @@ async fn split_request<'a>( let first = GetRequest::blob(req.hash); execute_get(pool, Arc::new(first), providers, store, progress).await?; let size = store.observe(req.hash).await?.size(); - anyhow::ensure!(size % 32 == 0, "Size is not a multiple of 32"); + n0_error::ensure_any!(size % 32 == 0, "Size is not a multiple of 32"); let n = size / 32; Box::new( req.ranges @@ -428,7 +430,7 @@ async fn execute_get( providers: &Arc, store: &Store, mut progress: impl Sink, -) -> anyhow::Result<()> { +) -> Result<()> { let remote = store.remote(); let mut providers = providers.find_providers(request.content()); while let Some(provider) = providers.next().await { @@ -480,7 +482,7 @@ async fn execute_get( } } } - bail!("Unable to download {}", request.hash); + Err(anyerr!("Unable to download {}", request.hash)) } /// Trait for pluggable content discovery strategies. diff --git a/src/api/remote.rs b/src/api/remote.rs index a71b5c001..527f7588b 100644 --- a/src/api/remote.rs +++ b/src/api/remote.rs @@ -15,11 +15,9 @@ use bao_tree::{ use genawaiter::sync::{Co, Gen}; use iroh::endpoint::Connection; use irpc::util::{AsyncReadVarintExt, WriteVarintExt}; +use n0_error::{e, stack_error, AnyError, Result, StdResultExt}; use n0_future::{io, Stream, StreamExt}; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; use ref_cast::RefCast; -use snafu::{Backtrace, IntoError, ResultExt, Snafu}; use tracing::{debug, trace}; use super::blobs::{Bitfield, ExportBaoOptions}; @@ -34,7 +32,6 @@ use crate::{ AtBlobHeader, AtConnected, AtEndBlob, BlobContentNext, ConnectedNext, DecodeError, EndBlobNext, }, - get_error::{BadRequestSnafu, LocalFailureSnafu}, GetError, GetResult, Stats, StreamPair, }, hashseq::{HashSeq, HashSeqIter}, @@ -124,7 +121,10 @@ impl GetProgress { pub async fn complete(self) -> GetResult { just_result(self.stream()).await.unwrap_or_else(|| { - Err(LocalFailureSnafu.into_error(anyhow::anyhow!("stream closed without result"))) + Err(e!( + GetError::LocalFailure, + n0_error::anyerr!("stream closed without result") + )) }) } } @@ -136,11 +136,11 @@ pub enum PushProgressItem { /// The request was completed. Done(Stats), /// The request was closed, but not completed. - Error(anyhow::Error), + Error(AnyError), } -impl From> for PushProgressItem { - fn from(res: anyhow::Result) -> Self { +impl From> for PushProgressItem { + fn from(res: Result) -> Self { match res { Ok(stats) => Self::Done(stats), Err(e) => Self::Error(e), @@ -148,7 +148,7 @@ impl From> for PushProgressItem { } } -impl TryFrom for anyhow::Result { +impl TryFrom for Result { type Error = &'static str; fn try_from(item: PushProgressItem) -> Result { @@ -166,7 +166,7 @@ pub struct PushProgress { } impl IntoFuture for PushProgress { - type Output = anyhow::Result; + type Output = Result; type IntoFuture = n0_future::boxed::BoxFuture; fn into_future(self) -> n0_future::boxed::BoxFuture { @@ -179,10 +179,10 @@ impl PushProgress { into_stream(self.rx, self.fut) } - pub async fn complete(self) -> anyhow::Result { + pub async fn complete(self) -> Result { just_result(self.stream()) .await - .unwrap_or_else(|| Err(anyhow::anyhow!("stream closed without result"))) + .unwrap_or_else(|| Err(n0_error::anyerr!("stream closed without result"))) } } @@ -441,7 +441,7 @@ impl Remote { pub async fn local_for_request( &self, request: impl Into>, - ) -> anyhow::Result { + ) -> Result { let request = request.into(); let root = request.hash; let bitfield = self.store().observe(root).await?; @@ -494,7 +494,7 @@ impl Remote { } /// Get the local info for a given blob or hash sequence, at the present time. - pub async fn local(&self, content: impl Into) -> anyhow::Result { + pub async fn local(&self, content: impl Into) -> Result { let request = GetRequest::from(content.into()); self.local_for_request(request).await } @@ -536,7 +536,7 @@ impl Remote { let local = self .local(content) .await - .map_err(|e: anyhow::Error| LocalFailureSnafu.into_error(e))?; + .map_err(|e| e!(GetError::LocalFailure, e))?; if local.is_complete() { return Ok(Default::default()); } @@ -599,16 +599,16 @@ impl Remote { conn: Connection, request: PushRequest, progress: impl Sink, - ) -> anyhow::Result { + ) -> Result { let hash = request.hash; debug!(%hash, "pushing"); - let (mut send, mut recv) = conn.open_bi().await?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; let mut context = StreamContext { payload_bytes_sent: 0, sender: progress, }; // we are not going to need this! - recv.stop(0u32.into())?; + recv.stop(0u32.into()).anyerr()?; // write the request. Unlike for reading, we can just serialize it sync using postcard. let request = write_push_request(request, &mut send).await?; let mut request_ranges = request.ranges.iter_infinite(); @@ -622,7 +622,7 @@ impl Remote { } if request.ranges.is_blob() { // we are done - send.finish()?; + send.finish().anyerr()?; return Ok(Default::default()); } let hash_seq = self.store().get_bytes(root).await?; @@ -637,7 +637,7 @@ impl Remote { .await?; } } - send.finish()?; + send.finish().anyerr()?; Ok(Default::default()) } @@ -681,7 +681,10 @@ impl Remote { let store = self.store(); let root = request.hash; let conn = conn.open_stream_pair().await.map_err(|e| { - LocalFailureSnafu.into_error(anyhow::anyhow!("failed to open stream pair: {e}")) + e!( + GetError::LocalFailure, + n0_error::anyerr!("failed to open stream pair: {e}") + ) })?; // I am cloning the connection, but it's fine because the original connection or ConnectionRef stays alive // for the duration of the operation. @@ -689,7 +692,11 @@ impl Remote { AtConnected::new(conn.t0, conn.recv, conn.send, request, Default::default()); trace!("Getting header"); // read the header - let next_child = match connected.next().await? { + let next_child = match connected + .next() + .await + .map_err(|e| e!(GetError::ConnectedNext, e))? + { ConnectedNext::StartRoot(at_start_root) => { let header = at_start_root.next(); let end = get_blob_ranges_impl(header, root, store, &mut progress).await?; @@ -709,9 +716,9 @@ impl Remote { store .get_bytes(root) .await - .map_err(|e| LocalFailureSnafu.into_error(e.into()))?, + .map_err(|e| e!(GetError::LocalFailure, e.into()))?, ) - .context(BadRequestSnafu)?; + .map_err(|e| e!(GetError::BadRequest, e))?; // let mut hash_seq = LazyHashSeq::new(store.blobs().clone(), root); loop { let at_start_child = match next_child { @@ -734,7 +741,10 @@ impl Remote { Err(at_closing) => at_closing, }; // read the rest, if any - let stats = at_closing.next().await?; + let stats = at_closing + .next() + .await + .map_err(|e| e!(GetError::AtClosingNext, e))?; trace!(?stats, "get hash seq done"); Ok(stats) } @@ -796,51 +806,57 @@ impl Remote { Err(at_closing) => at_closing, }; // read the rest, if any - let stats = at_closing.next().await?; + let stats = at_closing + .next() + .await + .map_err(|e| e!(GetError::AtClosingNext, e))?; trace!(?stats, "get hash seq done"); Ok(stats) } } /// Failures for a get operation -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] #[allow(missing_docs)] #[non_exhaustive] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta)] pub enum ExecuteError { /// Network or IO operation failed. - #[snafu(display("Unable to open bidi stream"))] + #[error("Unable to open bidi stream")] Connection { + #[error(std_err)] source: iroh::endpoint::ConnectionError, }, - #[snafu(display("Unable to read from the remote"))] - Read { source: iroh::endpoint::ReadError }, - #[snafu(display("Error sending the request"))] + #[error("Unable to read from the remote")] + Read { + #[error(std_err)] + source: iroh::endpoint::ReadError, + }, + #[error("Error sending the request")] Send { + #[error(std_err)] source: crate::get::fsm::ConnectedNextError, }, - #[snafu(display("Unable to read size"))] + #[error("Unable to read size")] Size { + #[error(std_err)] source: crate::get::fsm::AtBlobHeaderNextError, }, - #[snafu(display("Error while decoding the data"))] + #[error("Error while decoding the data")] Decode { + #[error(std_err)] source: crate::get::fsm::DecodeError, }, - #[snafu(display("Internal error while reading the hash sequence"))] + #[error("Internal error while reading the hash sequence")] ExportBao { source: api::ExportBaoError }, - #[snafu(display("Hash sequence has an invalid length"))] - InvalidHashSeq { source: anyhow::Error }, - #[snafu(display("Internal error importing the data"))] + #[error("Hash sequence has an invalid length")] + InvalidHashSeq { source: AnyError }, + #[error("Internal error importing the data")] ImportBao { source: crate::api::RequestError }, - #[snafu(display("Error sending download progress - receiver closed"))] + #[error("Error sending download progress - receiver closed")] SendDownloadProgress { source: irpc::channel::SendError }, - #[snafu(display("Internal error importing the data"))] + #[error("Internal error importing the data")] MpscSend { + #[error(std_err)] source: tokio::sync::mpsc::error::SendError, }, } @@ -877,13 +893,19 @@ async fn get_blob_ranges_impl( store: &Store, mut progress: impl Sink, ) -> GetResult> { - let (mut content, size) = header.next().await?; + let (mut content, size) = header + .next() + .await + .map_err(|e| e!(GetError::AtBlobHeaderNext, e))?; let Some(size) = NonZeroU64::new(size) else { return if hash == Hash::EMPTY { - let end = content.drain().await?; + let end = content.drain().await.map_err(|e| e!(GetError::Decode, e))?; Ok(end) } else { - Err(DecodeError::leaf_hash_mismatch(ChunkNum(0)).into()) + Err(e!( + GetError::Decode, + DecodeError::leaf_hash_mismatch(ChunkNum(0)) + )) }; }; let buffer_size = get_buffer_size(size); @@ -891,17 +913,21 @@ async fn get_blob_ranges_impl( let handle = store .import_bao(hash, size, buffer_size) .await - .map_err(|e| LocalFailureSnafu.into_error(e.into()))?; + .map_err(|e| e!(GetError::LocalFailure, e.into()))?; let write = async move { GetResult::Ok(loop { match content.next().await { BlobContentNext::More((next, res)) => { - let item = res?; + let item = res.map_err(|e| e!(GetError::Decode, e))?; progress .send(next.stats().payload_bytes_read) .await - .map_err(|e| LocalFailureSnafu.into_error(e.into()))?; - handle.tx.send(item).await?; + .map_err(|e| e!(GetError::LocalFailure, e.into()))?; + handle + .tx + .send(item) + .await + .map_err(|e| e!(GetError::IrpcSend, e))?; content = next; } BlobContentNext::Done(end) => { @@ -913,7 +939,10 @@ async fn get_blob_ranges_impl( }; let complete = async move { handle.rx.await.map_err(|e| { - LocalFailureSnafu.into_error(anyhow::anyhow!("error reading from import stream: {e}")) + e!( + GetError::LocalFailure, + n0_error::anyerr!("error reading from import stream: {e}") + ) }) }; let (_, end) = tokio::try_join!(complete, write)?; @@ -936,7 +965,7 @@ pub(crate) struct HashSeqChunk { } impl TryFrom for HashSeqChunk { - type Error = anyhow::Error; + type Error = AnyError; fn try_from(leaf: Leaf) -> Result { let offset = leaf.offset; @@ -983,7 +1012,7 @@ impl LazyHashSeq { } #[allow(dead_code)] - pub async fn get_from_offset(&mut self, offset: u64) -> anyhow::Result> { + pub async fn get_from_offset(&mut self, offset: u64) -> Result> { if offset == 0 { Ok(Some(self.hash)) } else { @@ -992,7 +1021,7 @@ impl LazyHashSeq { } #[allow(dead_code)] - pub async fn get(&mut self, child_offset: u64) -> anyhow::Result> { + pub async fn get(&mut self, child_offset: u64) -> Result> { // check if we have the hash in the current chunk if let Some(chunk) = &self.current_chunk { if let Some(hash) = chunk.get(child_offset) { @@ -1015,7 +1044,7 @@ impl LazyHashSeq { async fn write_push_request( request: PushRequest, stream: &mut impl SendStream, -) -> anyhow::Result { +) -> Result { let mut request_bytes = Vec::new(); request_bytes.push(RequestType::Push as u8); request_bytes.write_length_prefixed(&request).unwrap(); @@ -1053,7 +1082,7 @@ where self.sender .send(self.payload_bytes_sent) .await - .map_err(|e| ProgressError::Internal { source: e.into() })?; + .map_err(|e| n0_error::e!(ProgressError::Internal, e.into()))?; Ok(()) } diff --git a/src/format/collection.rs b/src/format/collection.rs index fd8884fd9..b3cb5ec2d 100644 --- a/src/format/collection.rs +++ b/src/format/collection.rs @@ -1,9 +1,10 @@ //! The collection type used by iroh use std::{collections::BTreeMap, future::Future}; -use anyhow::Context; +// n0_error::Context is no longer exported; use explicit mapping instead. use bao_tree::blake3; use bytes::Bytes; +use n0_error::{Result, StdResultExt}; use serde::{Deserialize, Serialize}; use crate::{ @@ -66,11 +67,11 @@ impl IntoIterator for Collection { /// A simple store trait for loading blobs pub trait SimpleStore { /// Load a blob from the store - fn load(&self, hash: Hash) -> impl Future> + Send + '_; + fn load(&self, hash: Hash) -> impl Future> + Send + '_; } impl SimpleStore for crate::api::Store { - async fn load(&self, hash: Hash) -> anyhow::Result { + async fn load(&self, hash: Hash) -> Result { Ok(self.get_bytes(hash).await?) } } @@ -115,23 +116,26 @@ impl Collection { /// the links array, and the collection. pub async fn read_fsm( fsm_at_start_root: fsm::AtStartRoot, - ) -> anyhow::Result<(fsm::EndBlobNext, HashSeq, Collection)> { + ) -> Result<(fsm::EndBlobNext, HashSeq, Collection)> { let (next, links) = { let curr = fsm_at_start_root.next(); let (curr, data) = curr.concatenate_into_vec().await?; - let links = HashSeq::new(data.into()).context("links could not be parsed")?; + let links = HashSeq::new(data.into()) + .ok_or_else(|| n0_error::anyerr!("links could not be parsed"))?; (curr.next(), links) }; let fsm::EndBlobNext::MoreChildren(at_meta) = next else { - anyhow::bail!("expected meta"); + n0_error::bail_any!("expected meta"); }; let (next, collection) = { let mut children = links.clone(); - let meta_link = children.pop_front().context("meta link not found")?; + let meta_link = children + .pop_front() + .ok_or_else(|| n0_error::anyerr!("meta link not found"))?; let curr = at_meta.next(meta_link); let (curr, names) = curr.concatenate_into_vec().await?; - let names = postcard::from_bytes::(&names)?; - anyhow::ensure!( + let names = postcard::from_bytes::(&names).anyerr()?; + n0_error::ensure_any!( names.header == *Self::HEADER, "expected header {:?}, got {:?}", Self::HEADER, @@ -148,7 +152,7 @@ impl Collection { /// Returns the collection, a map from blob offsets to bytes, and the stats. pub async fn read_fsm_all( fsm_at_start_root: crate::get::fsm::AtStartRoot, - ) -> anyhow::Result<(Collection, BTreeMap, Stats)> { + ) -> Result<(Collection, BTreeMap, Stats)> { let (next, links, collection) = Self::read_fsm(fsm_at_start_root).await?; let mut res = BTreeMap::new(); let mut curr = next; @@ -156,7 +160,7 @@ impl Collection { match curr { fsm::EndBlobNext::MoreChildren(more) => { let child_offset = more.offset() - 1; - let Some(hash) = links.get(usize::try_from(child_offset)?) else { + let Some(hash) = links.get(usize::try_from(child_offset).anyerr()?) else { break more.finish(); }; let header = more.next(hash); @@ -172,13 +176,16 @@ impl Collection { } /// Create a new collection from a hash sequence and metadata. - pub async fn load(root: Hash, store: &impl SimpleStore) -> anyhow::Result { + pub async fn load(root: Hash, store: &impl SimpleStore) -> Result { let hs = store.load(root).await?; let hs = HashSeq::try_from(hs)?; - let meta_hash = hs.iter().next().context("empty hash seq")?; + let meta_hash = hs + .iter() + .next() + .ok_or_else(|| n0_error::anyerr!("empty hash seq"))?; let meta = store.load(meta_hash).await?; - let meta: CollectionMeta = postcard::from_bytes(&meta)?; - anyhow::ensure!( + let meta: CollectionMeta = postcard::from_bytes(&meta).anyerr()?; + n0_error::ensure_any!( meta.names.len() + 1 == hs.len(), "names and links length mismatch" ); @@ -187,9 +194,9 @@ impl Collection { /// Store a collection in a store. returns the root hash of the collection /// as a TempTag. - pub async fn store(self, db: &Store) -> anyhow::Result { + pub async fn store(self, db: &Store) -> Result { let (links, meta) = self.into_parts(); - let meta_bytes = postcard::to_stdvec(&meta)?; + let meta_bytes = postcard::to_stdvec(&meta).anyerr()?; let meta_tag = db.add_bytes(meta_bytes).temp_tag().await?; let links_bytes = std::iter::once(meta_tag.hash()) .chain(links) @@ -257,6 +264,8 @@ impl Collection { #[cfg(test)] mod tests { + use n0_error::{Result, StackResultExt}; + use super::*; #[test] @@ -322,7 +331,7 @@ mod tests { } impl SimpleStore for TestStore { - async fn load(&self, hash: Hash) -> anyhow::Result { + async fn load(&self, hash: Hash) -> Result { self.0.get(&hash).cloned().context("not found") } } diff --git a/src/get.rs b/src/get.rs index d9c59b034..25e74c66d 100644 --- a/src/get.rs +++ b/src/get.rs @@ -21,14 +21,11 @@ use std::{ time::Duration, }; -use anyhow::Result; use bao_tree::{io::fsm::BaoContentItem, ChunkNum}; use fsm::RequestCounters; +use n0_error::Result; use n0_future::time::Instant; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; use serde::{Deserialize, Serialize}; -use snafu::{Backtrace, IntoError, ResultExt, Snafu}; use tracing::{debug, error}; use crate::{ @@ -40,7 +37,6 @@ use crate::{ mod error; pub mod request; -pub(crate) use error::get_error; pub use error::{GetError, GetResult}; type DefaultReader = iroh::endpoint::RecvStream; @@ -119,10 +115,10 @@ pub mod fsm { use derive_more::From; use iroh::endpoint::Connection; use iroh_io::AsyncSliceWriter; + use n0_error::{e, stack_error, AnyError}; use super::*; use crate::{ - get::get_error::BadRequestSnafu, protocol::{ GetManyRequest, GetRequest, NonEmptyRequestRangeSpecIter, Request, MAX_MESSAGE_SIZE, }, @@ -156,14 +152,14 @@ pub mod fsm { let (mut writer, reader) = connection .open_bi() .await - .map_err(|e| OpenSnafu.into_error(e.into()))?; + .map_err(|e| e!(InitialNextError::Open, e.into()))?; let request = Request::GetMany(request); let request_bytes = postcard::to_stdvec(&request) - .map_err(|source| BadRequestSnafu.into_error(source.into()))?; + .map_err(|source| e!(GetError::BadRequest, AnyError::from_std(source)))?; writer .send_bytes(request_bytes.into()) .await - .context(connected_next_error::WriteSnafu)?; + .map_err(|source| e!(ConnectedNextError::Write, source))?; let Request::GetMany(request) = request else { unreachable!(); }; @@ -247,7 +243,7 @@ pub mod fsm { .connection .open_bi() .await - .map_err(|e| OpenSnafu.into_error(e.into()))?; + .map_err(|e| e!(InitialNextError::Open, e.into()))?; Ok(AtConnected { start, reader, @@ -258,17 +254,14 @@ pub mod fsm { } } - /// Error that you can get from [`AtConnected::next`] - #[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, - })] - #[allow(missing_docs)] - #[derive(Debug, Snafu)] - #[non_exhaustive] + /// Error that you can get from [`AtInitial::next`] + #[stack_error(derive, add_meta, from_sources)] pub enum InitialNextError { - Open { source: io::Error }, + #[error("open: {source}")] + Open { + #[error(std_err)] + source: io::Error, + }, } /// State of the get response machine after the handshake has been sent @@ -293,25 +286,23 @@ pub mod fsm { } /// Error that you can get from [`AtConnected::next`] - #[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, - })] - #[allow(missing_docs)] - #[derive(Debug, Snafu)] - #[snafu(module)] - #[non_exhaustive] + #[stack_error(derive, add_meta)] pub enum ConnectedNextError { /// Error when serializing the request - #[snafu(display("postcard ser: {source}"))] - PostcardSer { source: postcard::Error }, + #[error("postcard ser: {source}")] + PostcardSer { + #[error(std_err)] + source: postcard::Error, + }, /// The serialized request is too long to be sent - #[snafu(display("request too big"))] + #[error("request too big")] RequestTooBig {}, /// Error when writing the request to the [`SendStream`]. - #[snafu(display("write: {source}"))] - Write { source: io::Error }, + #[error("write: {source}")] + Write { + #[error(std_err)] + source: io::Error, + }, } impl AtConnected { @@ -350,14 +341,14 @@ pub mod fsm { debug!("sending request"); let wrapped = Request::Get(request); let request_bytes = postcard::to_stdvec(&wrapped) - .context(connected_next_error::PostcardSerSnafu)?; + .map_err(|source| e!(ConnectedNextError::PostcardSer, source))?; let Request::Get(x) = wrapped else { unreachable!(); }; request = x; if request_bytes.len() > MAX_MESSAGE_SIZE { - return Err(connected_next_error::RequestTooBigSnafu.build()); + return Err(e!(ConnectedNextError::RequestTooBig)); } // write the request itself @@ -365,11 +356,11 @@ pub mod fsm { writer .send_bytes(request_bytes.into()) .await - .context(connected_next_error::WriteSnafu)?; + .map_err(|source| e!(ConnectedNextError::Write, source))?; writer .sync() .await - .context(connected_next_error::WriteSnafu)?; + .map_err(|source| e!(ConnectedNextError::Write, source))?; len }; @@ -503,23 +494,19 @@ pub mod fsm { } /// Error that you can get from [`AtBlobHeader::next`] - #[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, - })] - #[non_exhaustive] - #[derive(Debug, Snafu)] - #[snafu(module)] + #[stack_error(derive, add_meta)] pub enum AtBlobHeaderNextError { /// Eof when reading the size header /// /// This indicates that the provider does not have the requested data. - #[snafu(display("not found"))] + #[error("not found")] NotFound {}, /// Generic io error - #[snafu(display("io: {source}"))] - Read { source: io::Error }, + #[error("io: {source}")] + Read { + #[error(std_err)] + source: io::Error, + }, } impl From for io::Error { @@ -539,9 +526,9 @@ pub mod fsm { let mut size = [0; 8]; self.reader.recv_exact(&mut size).await.map_err(|cause| { if cause.kind() == io::ErrorKind::UnexpectedEof { - at_blob_header_next_error::NotFoundSnafu.build() + e!(AtBlobHeaderNextError::NotFound) } else { - at_blob_header_next_error::ReadSnafu.into_error(cause) + e!(AtBlobHeaderNextError::Read, cause) } })?; self.misc.other_bytes_read += 8; @@ -646,51 +633,49 @@ pub mod fsm { /// variants indicate that the provider has sent us invalid data. A well-behaved /// provider should never do this, so this is an indication that the provider is /// not behaving correctly. - #[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, - })] #[non_exhaustive] - #[derive(Debug, Snafu)] - #[snafu(module)] + #[stack_error(derive, add_meta)] pub enum DecodeError { /// A chunk was not found or invalid, so the provider stopped sending data - #[snafu(display("not found"))] + #[error("not found")] ChunkNotFound {}, /// A parent was not found or invalid, so the provider stopped sending data - #[snafu(display("parent not found {node:?}"))] + #[error("parent not found {node:?}")] ParentNotFound { node: TreeNode }, /// A parent was not found or invalid, so the provider stopped sending data - #[snafu(display("chunk not found {num}"))] + #[error("chunk not found {num}")] LeafNotFound { num: ChunkNum }, /// The hash of a parent did not match the expected hash - #[snafu(display("parent hash mismatch: {node:?}"))] + #[error("parent hash mismatch: {node:?}")] ParentHashMismatch { node: TreeNode }, /// The hash of a leaf did not match the expected hash - #[snafu(display("leaf hash mismatch: {num}"))] + #[error("leaf hash mismatch: {num}")] LeafHashMismatch { num: ChunkNum }, /// Error when reading from the stream - #[snafu(display("read: {source}"))] - Read { source: io::Error }, + #[error("read: {source}")] + Read { + #[error(std_err)] + source: io::Error, + }, /// A generic io error - #[snafu(display("io: {source}"))] - Write { source: io::Error }, + #[error("io: {source}")] + Write { + #[error(std_err)] + source: io::Error, + }, } impl DecodeError { pub(crate) fn leaf_hash_mismatch(num: ChunkNum) -> Self { - decode_error::LeafHashMismatchSnafu { num }.build() + e!(DecodeError::LeafHashMismatch { num }) } } impl From for DecodeError { fn from(cause: AtBlobHeaderNextError) -> Self { match cause { - AtBlobHeaderNextError::NotFound { .. } => decode_error::ChunkNotFoundSnafu.build(), - AtBlobHeaderNextError::Read { source, .. } => { - decode_error::ReadSnafu.into_error(source) - } + AtBlobHeaderNextError::NotFound { .. } => e!(DecodeError::ChunkNotFound), + AtBlobHeaderNextError::Read { source, .. } => e!(DecodeError::Read, source), } } } @@ -715,18 +700,18 @@ pub mod fsm { fn from(value: bao_tree::io::DecodeError) -> Self { match value { bao_tree::io::DecodeError::ParentNotFound(node) => { - decode_error::ParentNotFoundSnafu { node }.build() + e!(DecodeError::ParentNotFound { node }) } bao_tree::io::DecodeError::LeafNotFound(num) => { - decode_error::LeafNotFoundSnafu { num }.build() + e!(DecodeError::LeafNotFound { num }) } bao_tree::io::DecodeError::ParentHashMismatch(node) => { - decode_error::ParentHashMismatchSnafu { node }.build() + e!(DecodeError::ParentHashMismatch { node }) } bao_tree::io::DecodeError::LeafHashMismatch(num) => { - decode_error::LeafHashMismatchSnafu { num }.build() + e!(DecodeError::LeafHashMismatch { num }) } - bao_tree::io::DecodeError::Io(cause) => decode_error::ReadSnafu.into_error(cause), + bao_tree::io::DecodeError::Io(cause) => e!(DecodeError::Read, cause), } } } @@ -856,13 +841,13 @@ pub mod fsm { outboard .save(parent.node, &parent.pair) .await - .map_err(|e| decode_error::WriteSnafu.into_error(e))?; + .map_err(|e| e!(DecodeError::Write, e))?; } } BaoContentItem::Leaf(leaf) => { data.write_bytes_at(leaf.offset, leaf.data) .await - .map_err(|e| decode_error::WriteSnafu.into_error(e))?; + .map_err(|e| e!(DecodeError::Write, e))?; } } } @@ -888,7 +873,7 @@ pub mod fsm { BaoContentItem::Leaf(leaf) => { data.write_bytes_at(leaf.offset, leaf.data) .await - .map_err(|e| decode_error::WriteSnafu.into_error(e))?; + .map_err(|e| e!(DecodeError::Write, e))?; } } } @@ -973,18 +958,14 @@ pub mod fsm { } /// Error that you can get from [`AtBlobHeader::next`] - #[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, - })] - #[non_exhaustive] - #[derive(Debug, Snafu)] - #[snafu(module)] + #[stack_error(derive, add_meta, from_sources)] pub enum AtClosingNextError { /// Generic io error - #[snafu(transparent)] - Read { source: io::Error }, + #[error(transparent)] + Read { + #[error(std_err)] + source: io::Error, + }, } #[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq)] diff --git a/src/get/error.rs b/src/get/error.rs index 5cc44e35b..8fa7ed4a8 100644 --- a/src/get/error.rs +++ b/src/get/error.rs @@ -2,54 +2,49 @@ use std::io; use iroh::endpoint::{ConnectionError, ReadError, VarInt, WriteError}; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; -use snafu::{Backtrace, Snafu}; +use n0_error::{stack_error, AnyError}; use crate::get::fsm::{ AtBlobHeaderNextError, AtClosingNextError, ConnectedNextError, DecodeError, InitialNextError, }; /// Failures for a get operation -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -#[snafu(module)] +#[stack_error(derive, add_meta)] pub enum GetError { - #[snafu(transparent)] + #[error(transparent)] InitialNext { + #[error(from)] source: InitialNextError, }, - #[snafu(transparent)] + #[error(transparent)] ConnectedNext { + #[error(from)] source: ConnectedNextError, }, - #[snafu(transparent)] + #[error(transparent)] AtBlobHeaderNext { + #[error(from)] source: AtBlobHeaderNextError, }, - #[snafu(transparent)] + #[error(transparent)] Decode { + #[error(from)] source: DecodeError, }, - #[snafu(transparent)] + #[error(transparent)] IrpcSend { + #[error(from)] source: irpc::channel::SendError, }, - #[snafu(transparent)] + #[error(transparent)] AtClosingNext { + #[error(from)] source: AtClosingNextError, }, - LocalFailure { - source: anyhow::Error, - }, - BadRequest { - source: anyhow::Error, - }, + #[error("local failure")] + LocalFailure { source: AnyError }, + #[error("bad request")] + BadRequest { source: AnyError }, } impl GetError { diff --git a/src/get/request.rs b/src/get/request.rs index e55235cca..c4c15cdf5 100644 --- a/src/get/request.rs +++ b/src/get/request.rs @@ -17,15 +17,14 @@ use bao_tree::{io::BaoContentItem, ChunkNum, ChunkRanges}; use bytes::Bytes; use genawaiter::sync::{Co, Gen}; use iroh::endpoint::Connection; +use n0_error::e; use n0_future::{Stream, StreamExt}; use nested_enum_utils::enum_conversions; use rand::Rng; -use snafu::IntoError; use tokio::sync::mpsc; use super::{fsm, GetError, GetResult, Stats}; use crate::{ - get::get_error::{BadRequestSnafu, LocalFailureSnafu}, hashseq::HashSeq, protocol::{ChunkRangesExt, ChunkRangesSeq, GetRequest}, Hash, HashAndFormat, @@ -58,7 +57,10 @@ impl GetBlobResult { let mut parts = Vec::new(); let stats = loop { let Some(item) = self.next().await else { - return Err(LocalFailureSnafu.into_error(anyhow::anyhow!("unexpected end"))); + return Err(e!( + GetError::LocalFailure, + n0_error::anyerr!("unexpected end") + )); }; match item { GetBlobItem::Item(item) => { @@ -238,11 +240,14 @@ pub async fn get_hash_seq_and_sizes( let (at_blob_content, size) = at_start_root.next().await?; // check the size to avoid parsing a maliciously large hash seq if size > max_size { - return Err(BadRequestSnafu.into_error(anyhow::anyhow!("size too large"))); + return Err(e!( + GetError::BadRequest, + n0_error::anyerr!("size too large") + )); } let (mut curr, hash_seq) = at_blob_content.concatenate_into_vec().await?; let hash_seq = - HashSeq::try_from(Bytes::from(hash_seq)).map_err(|e| BadRequestSnafu.into_error(e))?; + HashSeq::try_from(Bytes::from(hash_seq)).map_err(|e| e!(GetError::BadRequest, e))?; let mut sizes = Vec::with_capacity(hash_seq.len()); let closing = loop { match curr.next() { diff --git a/src/hash.rs b/src/hash.rs index 22fe333d4..88fc2d2d9 100644 --- a/src/hash.rs +++ b/src/hash.rs @@ -4,11 +4,9 @@ use std::{borrow::Borrow, fmt, str::FromStr}; use arrayvec::ArrayString; use bao_tree::blake3; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; +use n0_error::{e, stack_error, StdResultExt}; use postcard::experimental::max_size::MaxSize; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use snafu::{Backtrace, ResultExt, Snafu}; use crate::store::util::DD; @@ -137,19 +135,17 @@ impl fmt::Display for Hash { } } -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] #[allow(missing_docs)] #[non_exhaustive] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, std_sources)] pub enum HexOrBase32ParseError { - #[snafu(display("Invalid length"))] + #[error("Invalid length")] DecodeInvalidLength {}, - #[snafu(display("Failed to decode {source}"))] - Decode { source: data_encoding::DecodeError }, + #[error("Failed to decode {source}")] + Decode { + #[error(std_err)] + source: data_encoding::DecodeError, + }, } impl FromStr for Hash { @@ -167,10 +163,10 @@ impl FromStr for Hash { match res { Ok(len) => { if len != 32 { - return Err(DecodeInvalidLengthSnafu.build()); + return Err(e!(HexOrBase32ParseError::DecodeInvalidLength)); } } - Err(partial) => return Err(partial.error).context(DecodeSnafu), + Err(partial) => return Err(e!(HexOrBase32ParseError::Decode, partial.error)), } Ok(Self(blake3::Hash::from_bytes(bytes))) } @@ -399,21 +395,23 @@ impl fmt::Display for HashAndFormat { } impl FromStr for HashAndFormat { - type Err = anyhow::Error; + type Err = n0_error::AnyError; fn from_str(s: &str) -> Result { let s = s.as_bytes(); let mut hash = [0u8; 32]; match s.len() { 64 => { - hex::decode_to_slice(s, &mut hash)?; + hex::decode_to_slice(s, &mut hash).anyerr()?; Ok(Self::raw(hash.into())) } 65 if s[0].eq_ignore_ascii_case(&b's') => { - hex::decode_to_slice(&s[1..], &mut hash)?; + hex::decode_to_slice(&s[1..], &mut hash).anyerr()?; Ok(Self::hash_seq(hash.into())) } - _ => anyhow::bail!("invalid hash and format"), + _ => { + n0_error::bail_any!("invalid hash and format"); + } } } } diff --git a/src/hashseq.rs b/src/hashseq.rs index 98d96e458..84aa950cc 100644 --- a/src/hashseq.rs +++ b/src/hashseq.rs @@ -2,6 +2,7 @@ use std::fmt::Debug; use bytes::Bytes; +use n0_error::{anyerr, AnyError}; use crate::Hash; @@ -34,10 +35,10 @@ impl FromIterator for HashSeq { } impl TryFrom for HashSeq { - type Error = anyhow::Error; + type Error = AnyError; fn try_from(bytes: Bytes) -> Result { - Self::new(bytes).ok_or_else(|| anyhow::anyhow!("invalid hash sequence")) + Self::new(bytes).ok_or_else(|| anyerr!("invalid hash sequence")) } } diff --git a/src/net_protocol.rs b/src/net_protocol.rs index 4eb112650..99464dd32 100644 --- a/src/net_protocol.rs +++ b/src/net_protocol.rs @@ -5,7 +5,7 @@ //! # Example //! //! ```rust -//! # async fn example() -> anyhow::Result<()> { +//! # async fn example() -> n0_error::Result<()> { //! use iroh::{protocol::Router, Endpoint}; //! use iroh_blobs::{store, ticket::BlobTicket, BlobsProtocol}; //! diff --git a/src/protocol.rs b/src/protocol.rs index db5faf060..c75526642 100644 --- a/src/protocol.rs +++ b/src/protocol.rs @@ -387,8 +387,8 @@ use range_collections::{range_set::RangeSetEntry, RangeSet2}; use serde::{Deserialize, Serialize}; mod range_spec; pub use bao_tree::ChunkRanges; +use n0_error::stack_error; pub use range_spec::{ChunkRangesSeq, NonEmptyRequestRangeSpecIter, RangeSpec}; -use snafu::{GenerateImplicitData, Snafu}; use crate::{api::blobs::Bitfield, util::RecvStreamExt, BlobFormat, Hash, HashAndFormat}; @@ -703,20 +703,10 @@ impl From for VarInt { } /// Unknown error_code, can not be converted into [`Closed`]. -#[derive(Debug, Snafu)] -#[snafu(display("Unknown error_code: {code}"))] +#[stack_error(derive, add_meta)] +#[error("Unknown error_code: {code}")] pub struct UnknownErrorCode { code: u64, - backtrace: Option, -} - -impl UnknownErrorCode { - pub(crate) fn new(code: u64) -> Self { - Self { - code, - backtrace: GenerateImplicitData::generate(), - } - } } impl TryFrom for Closed { @@ -727,7 +717,7 @@ impl TryFrom for Closed { 0 => Ok(Self::StreamDropped), 1 => Ok(Self::ProviderTerminating), 2 => Ok(Self::RequestReceived), - val => Err(UnknownErrorCode::new(val)), + val => Err(n0_error::e!(UnknownErrorCode { code: val })), } } } diff --git a/src/provider.rs b/src/provider.rs index fa4150619..8c7fb76f9 100644 --- a/src/provider.rs +++ b/src/provider.rs @@ -5,13 +5,12 @@ //! handler with an [`iroh::Endpoint`](iroh::protocol::Router). use std::{fmt::Debug, future::Future, io, time::Duration}; -use anyhow::Result; use bao_tree::ChunkRanges; use iroh::endpoint::{self, ConnectionError, VarInt}; use iroh_io::{AsyncStreamReader, AsyncStreamWriter}; +use n0_error::{e, stack_error, Result}; use n0_future::{time::Instant, StreamExt}; use serde::{Deserialize, Serialize}; -use snafu::Snafu; use tokio::select; use tracing::{debug, debug_span, Instrument}; @@ -370,7 +369,7 @@ async fn handle_read_result( pub async fn handle_stream( mut pair: StreamPair, store: Store, -) -> anyhow::Result<()> { +) -> n0_error::Result<()> { let request = pair.read_request().await?; match request { Request::Get(request) => handle_get(pair, store, request).await?, @@ -382,15 +381,17 @@ pub async fn handle_stream( Ok(()) } -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta, from_sources)] pub enum HandleGetError { - #[snafu(transparent)] + #[error(transparent)] ExportBao { + #[error(std_err)] source: ExportBaoError, }, - InvalidHashSeq, - InvalidOffset, + #[error("Invalid hash sequence")] + InvalidHashSeq {}, + #[error("Invalid offset")] + InvalidOffset {}, } impl HasErrorCode for HandleGetError { @@ -398,9 +399,10 @@ impl HasErrorCode for HandleGetError { match self { HandleGetError::ExportBao { source: ExportBaoError::ClientError { source, .. }, + .. } => source.code(), - HandleGetError::InvalidHashSeq => ERR_INTERNAL, - HandleGetError::InvalidOffset => ERR_INTERNAL, + HandleGetError::InvalidHashSeq { .. } => ERR_INTERNAL, + HandleGetError::InvalidOffset { .. } => ERR_INTERNAL, _ => ERR_INTERNAL, } } @@ -431,12 +433,12 @@ async fn handle_get_impl( None => { let bytes = store.get_bytes(hash).await?; let hs = - HashSeq::try_from(bytes).map_err(|_| HandleGetError::InvalidHashSeq)?; + HashSeq::try_from(bytes).map_err(|_| e!(HandleGetError::InvalidHashSeq))?; hash_seq = Some(hs); hash_seq.as_ref().unwrap() } }; - let o = usize::try_from(offset - 1).map_err(|_| HandleGetError::InvalidOffset)?; + let o = usize::try_from(offset - 1).map_err(|_| e!(HandleGetError::InvalidOffset))?; let Some(hash) = hash_seq.get(o) else { break; }; @@ -447,7 +449,7 @@ async fn handle_get_impl( .inner .sync() .await - .map_err(|e| HandleGetError::ExportBao { source: e.into() })?; + .map_err(|e| e!(HandleGetError::ExportBao, e.into()))?; Ok(()) } @@ -456,7 +458,7 @@ pub async fn handle_get( mut pair: StreamPair, store: Store, request: GetRequest, -) -> anyhow::Result<()> { +) -> n0_error::Result<()> { let res = pair.get_request(|| request.clone()).await; let tracker = handle_read_request_result(&mut pair, res).await?; let mut writer = pair.into_writer(tracker).await?; @@ -465,9 +467,9 @@ pub async fn handle_get( Ok(()) } -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] pub enum HandleGetManyError { - #[snafu(transparent)] + #[error(transparent)] ExportBao { source: ExportBaoError }, } @@ -476,6 +478,7 @@ impl HasErrorCode for HandleGetManyError { match self { Self::ExportBao { source: ExportBaoError::ClientError { source, .. }, + .. } => source.code(), _ => ERR_INTERNAL, } @@ -504,7 +507,7 @@ pub async fn handle_get_many( mut pair: StreamPair, store: Store, request: GetManyRequest, -) -> anyhow::Result<()> { +) -> n0_error::Result<()> { let res = pair.get_many_request(|| request.clone()).await; let tracker = handle_read_request_result(&mut pair, res).await?; let mut writer = pair.into_writer(tracker).await?; @@ -513,19 +516,16 @@ pub async fn handle_get_many( Ok(()) } -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] pub enum HandlePushError { - #[snafu(transparent)] - ExportBao { - source: ExportBaoError, - }, + #[error(transparent)] + ExportBao { source: ExportBaoError }, - InvalidHashSeq, + #[error("Invalid hash sequence")] + InvalidHashSeq {}, - #[snafu(transparent)] - Request { - source: RequestError, - }, + #[error(transparent)] + Request { source: RequestError }, } impl HasErrorCode for HandlePushError { @@ -533,6 +533,7 @@ impl HasErrorCode for HandlePushError { match self { Self::ExportBao { source: ExportBaoError::ClientError { source, .. }, + .. } => source.code(), _ => ERR_INTERNAL, } @@ -563,7 +564,7 @@ async fn handle_push_impl( } // todo: we assume here that the hash sequence is complete. For some requests this might not be the case. We would need `LazyHashSeq` for that, but it is buggy as of now! let hash_seq = store.get_bytes(hash).await?; - let hash_seq = HashSeq::try_from(hash_seq).map_err(|_| HandlePushError::InvalidHashSeq)?; + let hash_seq = HashSeq::try_from(hash_seq).map_err(|_| e!(HandlePushError::InvalidHashSeq))?; for (child_hash, child_ranges) in hash_seq.into_iter().zip(request_ranges) { if child_ranges.is_empty() { continue; @@ -579,7 +580,7 @@ pub async fn handle_push( mut pair: StreamPair, store: Store, request: PushRequest, -) -> anyhow::Result<()> { +) -> n0_error::Result<()> { let res = pair.push_request(|| request.clone()).await; let tracker = handle_read_request_result(&mut pair, res).await?; let mut reader = pair.into_reader(tracker).await?; @@ -602,14 +603,13 @@ pub(crate) async fn send_blob( .await } -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, std_sources, from_sources)] pub enum HandleObserveError { - ObserveStreamClosed, + #[error("observe stream closed")] + ObserveStreamClosed {}, - #[snafu(transparent)] - RemoteClosed { - source: io::Error, - }, + #[error(transparent)] + RemoteClosed { source: io::Error }, } impl HasErrorCode for HandleObserveError { @@ -630,18 +630,18 @@ async fn handle_observe_impl( .observe(request.hash) .stream() .await - .map_err(|_| HandleObserveError::ObserveStreamClosed)?; + .map_err(|_| e!(HandleObserveError::ObserveStreamClosed))?; let mut old = stream .next() .await - .ok_or(HandleObserveError::ObserveStreamClosed)?; + .ok_or_else(|| e!(HandleObserveError::ObserveStreamClosed))?; // send the initial bitfield send_observe_item(writer, &old).await?; // send updates until the remote loses interest loop { select! { new = stream.next() => { - let new = new.ok_or(HandleObserveError::ObserveStreamClosed)?; + let new = new.ok_or_else(|| e!(HandleObserveError::ObserveStreamClosed))?; let diff = old.diff(&new); if diff.is_empty() { continue; @@ -672,7 +672,7 @@ pub async fn handle_observe( mut pair: StreamPair, store: Store, request: ObserveRequest, -) -> anyhow::Result<()> { +) -> n0_error::Result<()> { let res = pair.observe_request(|| request.clone()).await; let tracker = handle_read_request_result(&mut pair, res).await?; let mut writer = pair.into_writer(tracker).await?; diff --git a/src/provider/events.rs b/src/provider/events.rs index 7f27b2dd2..4c7c22461 100644 --- a/src/provider/events.rs +++ b/src/provider/events.rs @@ -5,8 +5,8 @@ use irpc::{ channel::{mpsc, none::NoSender, oneshot}, rpc_requests, Channels, WithChannels, }; +use n0_error::{e, stack_error}; use serde::{Deserialize, Serialize}; -use snafu::Snafu; use crate::{ protocol::{ @@ -86,22 +86,22 @@ pub enum AbortReason { } /// Errors that can occur when sending progress updates. -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] pub enum ProgressError { - Limit, - Permission, - #[snafu(transparent)] - Internal { - source: irpc::Error, - }, + #[error("limit")] + Limit {}, + #[error("permission")] + Permission {}, + #[error(transparent)] + Internal { source: irpc::Error }, } impl From for io::Error { fn from(value: ProgressError) -> Self { match value { - ProgressError::Limit => io::ErrorKind::QuotaExceeded.into(), - ProgressError::Permission => io::ErrorKind::PermissionDenied.into(), - ProgressError::Internal { source } => source.into(), + ProgressError::Limit { .. } => io::ErrorKind::QuotaExceeded.into(), + ProgressError::Permission { .. } => io::ErrorKind::PermissionDenied.into(), + ProgressError::Internal { source, .. } => source.into(), } } } @@ -113,8 +113,8 @@ pub trait HasErrorCode { impl HasErrorCode for ProgressError { fn code(&self) -> VarInt { match self { - ProgressError::Limit => ERR_LIMIT, - ProgressError::Permission => ERR_PERMISSION, + ProgressError::Limit { .. } => ERR_LIMIT, + ProgressError::Permission { .. } => ERR_PERMISSION, ProgressError::Internal { .. } => ERR_INTERNAL, } } @@ -123,8 +123,8 @@ impl HasErrorCode for ProgressError { impl ProgressError { pub fn reason(&self) -> &'static [u8] { match self { - ProgressError::Limit => b"limit", - ProgressError::Permission => b"permission", + ProgressError::Limit { .. } => b"limit", + ProgressError::Permission { .. } => b"permission", ProgressError::Internal { .. } => b"internal", } } @@ -133,33 +133,27 @@ impl ProgressError { impl From for ProgressError { fn from(value: AbortReason) -> Self { match value { - AbortReason::RateLimited => ProgressError::Limit, - AbortReason::Permission => ProgressError::Permission, + AbortReason::RateLimited => n0_error::e!(ProgressError::Limit), + AbortReason::Permission => n0_error::e!(ProgressError::Permission), } } } impl From for ProgressError { fn from(value: irpc::channel::mpsc::RecvError) -> Self { - ProgressError::Internal { - source: value.into(), - } + n0_error::e!(ProgressError::Internal, value.into()) } } impl From for ProgressError { fn from(value: irpc::channel::oneshot::RecvError) -> Self { - ProgressError::Internal { - source: value.into(), - } + n0_error::e!(ProgressError::Internal, value.into()) } } impl From for ProgressError { fn from(value: irpc::channel::SendError) -> Self { - ProgressError::Internal { - source: value.into(), - } + n0_error::e!(ProgressError::Internal, value.into()) } } @@ -508,7 +502,7 @@ impl EventSender { RequestUpdates::Active(tx) } RequestMode::Disabled => { - return Err(ProgressError::Permission); + return Err(e!(ProgressError::Permission)); } _ => RequestUpdates::None, }, diff --git a/src/store/fs.rs b/src/store/fs.rs index 53c697abc..ffc9a07ee 100644 --- a/src/store/fs.rs +++ b/src/store/fs.rs @@ -94,6 +94,7 @@ use entry_state::{DataLocation, OutboardLocation}; use import::{ImportEntry, ImportSource}; use irpc::{channel::mpsc, RpcMessage}; use meta::list_blobs; +use n0_error::{Result, StdResultExt}; use n0_future::{future::yield_now, io}; use nested_enum_utils::enum_conversions; use range_collections::range_set::RangeSetRange; @@ -638,7 +639,7 @@ impl Actor { fs_commands_rx: tokio::sync::mpsc::Receiver, fs_commands_tx: tokio::sync::mpsc::Sender, options: Arc, - ) -> anyhow::Result { + ) -> Result { trace!( "creating data directory: {}", options.path.data_path.display() @@ -734,7 +735,7 @@ impl HashSpecificCommand for ExportPathMsg { _ => unreachable!(), }; self.tx - .send(ExportProgressItem::Error(api::Error::Io(err))) + .send(ExportProgressItem::Error(api::Error::from(err))) .await .ok(); } @@ -766,7 +767,7 @@ impl HashSpecificCommand for ExportRangesMsg { _ => unreachable!(), }; self.tx - .send(ExportRangesItem::Error(api::Error::Io(err))) + .send(ExportRangesItem::Error(api::Error::from(err))) .await .ok(); } @@ -781,7 +782,7 @@ impl HashSpecificCommand for ImportBaoMsg { SpawnArg::Dead => io::Error::other("entity is dead"), _ => unreachable!(), }; - self.tx.send(Err(api::Error::Io(err))).await.ok(); + self.tx.send(Err(api::Error::from(err))).await.ok(); } } impl HashSpecific for (TempTag, ImportEntryMsg) { @@ -1388,7 +1389,7 @@ async fn copy_with_progress( impl FsStore { /// Load or create a new store. - pub async fn load(root: impl AsRef) -> anyhow::Result { + pub async fn load(root: impl AsRef) -> Result { let path = root.as_ref(); let db_path = path.join("blobs.db"); let options = Options::new(path); @@ -1396,7 +1397,7 @@ impl FsStore { } /// Load or create a new store with custom options, returning an additional sender for file store specific commands. - pub async fn load_with_opts(db_path: PathBuf, options: Options) -> anyhow::Result { + pub async fn load_with_opts(db_path: PathBuf, options: Options) -> Result { static THREAD_NR: AtomicU64 = AtomicU64::new(0); let rt = tokio::runtime::Builder::new_multi_thread() .thread_name_fn(|| { @@ -1420,7 +1421,8 @@ impl FsStore { fs_commands_tx.clone(), Arc::new(options), )) - .await??; + .await + .anyerr()??; handle.spawn(actor.run()); let store = FsStore::new(commands_tx.into(), fs_commands_tx); if let Some(config) = gc_config { @@ -1476,7 +1478,7 @@ impl FsStore { } } - pub async fn dump(&self) -> anyhow::Result<()> { + pub async fn dump(&self) -> Result<()> { let (tx, rx) = oneshot::channel(); self.db .send( @@ -1486,8 +1488,9 @@ impl FsStore { } .into(), ) - .await?; - rx.await??; + .await + .anyerr()?; + rx.await.anyerr()??; Ok(()) } } @@ -1541,7 +1544,7 @@ pub mod tests { fn create_n0_bao_full( data: &[u8], ranges: &ChunkRanges, - ) -> anyhow::Result<(Hash, ChunkRanges, Vec)> { + ) -> n0_error::Result<(Hash, ChunkRanges, Vec)> { let ranges = round_up_request(data.len() as u64, ranges); let (hash, encoded) = create_n0_bao(data, &ranges)?; Ok((hash, ranges, encoded)) diff --git a/src/store/fs/bao_file.rs b/src/store/fs/bao_file.rs index 0502cead6..72ab48012 100644 --- a/src/store/fs/bao_file.rs +++ b/src/store/fs/bao_file.rs @@ -19,6 +19,7 @@ use bao_tree::{ use bytes::{Bytes, BytesMut}; use derive_more::Debug; use irpc::channel::mpsc; +use n0_error::{Result, StdResultExt}; use tokio::sync::watch; use tracing::{debug, info, trace}; @@ -706,7 +707,7 @@ impl BaoFileStorageSubscriber { /// Forward observed *values* to the given sender /// /// Returns an error if sending fails, or if the last sender is dropped - pub async fn forward(mut self, mut tx: mpsc::Sender) -> anyhow::Result<()> { + pub async fn forward(mut self, mut tx: mpsc::Sender) -> Result<()> { let value = self.receiver.borrow().bitfield(); tx.send(value).await?; loop { @@ -720,7 +721,7 @@ impl BaoFileStorageSubscriber { /// /// Returns an error if sending fails, or if the last sender is dropped #[allow(dead_code)] - pub async fn forward_delta(mut self, mut tx: mpsc::Sender) -> anyhow::Result<()> { + pub async fn forward_delta(mut self, mut tx: mpsc::Sender) -> Result<()> { let value = self.receiver.borrow().bitfield(); let mut old = value.clone(); tx.send(value).await?; @@ -736,13 +737,13 @@ impl BaoFileStorageSubscriber { } } - async fn update_or_closed(&mut self, tx: &mut mpsc::Sender) -> anyhow::Result<()> { + async fn update_or_closed(&mut self, tx: &mut mpsc::Sender) -> Result<()> { tokio::select! { _ = tx.closed() => { // the sender is closed, we are done Err(n0_error::e!(irpc::channel::SendError::ReceiverClosed).into()) } - e = self.receiver.changed() => Ok(e?), + e = self.receiver.changed() => Ok(e.anyerr()?), } } } diff --git a/src/store/fs/meta.rs b/src/store/fs/meta.rs index b03304ad1..6d17fc13d 100644 --- a/src/store/fs/meta.rs +++ b/src/store/fs/meta.rs @@ -11,10 +11,8 @@ use std::{ use bao_tree::BaoTree; use bytes::Bytes; use irpc::channel::mpsc; -use n0_snafu::SpanTrace; -use nested_enum_utils::common_fields; +use n0_error::{anyerr, e, stack_error, AnyError}; use redb::{Database, DatabaseError, ReadableTable}; -use snafu::{Backtrace, ResultExt, Snafu}; use tokio::pin; use crate::{ @@ -51,27 +49,39 @@ use crate::store::IROH_BLOCK_SIZE; /// /// What can go wrong are various things with redb, as well as io errors related /// to files other than redb. -#[common_fields({ - backtrace: Option, - #[snafu(implicit)] - span_trace: SpanTrace, -})] #[allow(missing_docs)] #[non_exhaustive] -#[derive(Debug, Snafu)] +#[stack_error(derive, add_meta, from_sources)] pub enum ActorError { - #[snafu(display("table error: {source}"))] - Table { source: redb::TableError }, - #[snafu(display("database error: {source}"))] - Database { source: redb::DatabaseError }, - #[snafu(display("transaction error: {source}"))] - Transaction { source: redb::TransactionError }, - #[snafu(display("commit error: {source}"))] - Commit { source: redb::CommitError }, - #[snafu(display("storage error: {source}"))] - Storage { source: redb::StorageError }, - #[snafu(display("inconsistent database state: {msg}"))] + #[error("table error: {source}")] + Table { + #[error(std_err)] + source: redb::TableError, + }, + #[error("database error: {source}")] + Database { + #[error(std_err)] + source: redb::DatabaseError, + }, + #[error("transaction error: {source}")] + Transaction { + #[error(std_err)] + source: redb::TransactionError, + }, + #[error("commit error: {source}")] + Commit { + #[error(std_err)] + source: redb::CommitError, + }, + #[error("storage error: {source}")] + Storage { + #[error(std_err)] + source: redb::StorageError, + }, + #[error("inconsistent database state: {msg}")] Inconsistent { msg: String }, + #[error(transparent)] + Other { source: AnyError }, } impl From for io::Error { @@ -82,11 +92,11 @@ impl From for io::Error { impl ActorError { pub(super) fn inconsistent(msg: String) -> Self { - InconsistentSnafu { msg }.build() + e!(ActorError::Inconsistent { msg }) } } -pub type ActorResult = Result; +pub type ActorResult = std::result::Result; #[derive(Debug, Clone)] pub struct Db { @@ -195,7 +205,7 @@ impl Db { fn handle_get(cmd: Get, tables: &impl ReadableTables) -> ActorResult<()> { trace!("{cmd:?}"); let Get { hash, tx, .. } = cmd; - let Some(entry) = tables.blobs().get(hash).context(StorageSnafu)? else { + let Some(entry) = tables.blobs().get(hash)? else { tx.send(GetResult { state: Ok(None) }); return Ok(()); }; @@ -223,26 +233,42 @@ fn handle_get(cmd: Get, tables: &impl ReadableTables) -> ActorResult<()> { fn handle_dump(cmd: Dump, tables: &impl ReadableTables) -> ActorResult<()> { trace!("{cmd:?}"); trace!("dumping database"); - for e in tables.blobs().iter().context(StorageSnafu)? { - let (k, v) = e.context(StorageSnafu)?; + for e in tables + .blobs() + .iter() + .map_err(|e| e!(ActorError::Storage, e))? + { + let (k, v) = e.map_err(|e| e!(ActorError::Storage, e))?; let k = k.value(); let v = v.value(); println!("blobs: {} -> {:?}", k.to_hex(), v); } - for e in tables.tags().iter().context(StorageSnafu)? { - let (k, v) = e.context(StorageSnafu)?; + for e in tables + .tags() + .iter() + .map_err(|e| e!(ActorError::Storage, e))? + { + let (k, v) = e.map_err(|e| e!(ActorError::Storage, e))?; let k = k.value(); let v = v.value(); println!("tags: {k} -> {v:?}"); } - for e in tables.inline_data().iter().context(StorageSnafu)? { - let (k, v) = e.context(StorageSnafu)?; + for e in tables + .inline_data() + .iter() + .map_err(|e| e!(ActorError::Storage, e))? + { + let (k, v) = e.map_err(|e| e!(ActorError::Storage, e))?; let k = k.value(); let v = v.value(); println!("inline_data: {} -> {:?}", k.to_hex(), v.len()); } - for e in tables.inline_outboard().iter().context(StorageSnafu)? { - let (k, v) = e.context(StorageSnafu)?; + for e in tables + .inline_outboard() + .iter() + .map_err(|e| e!(ActorError::Storage, e))? + { + let (k, v) = e.map_err(|e| e!(ActorError::Storage, e))?; let k = k.value(); let v = v.value(); println!("inline_outboard: {} -> {:?}", k.to_hex(), v.len()); @@ -271,11 +297,19 @@ async fn handle_get_blob_status( tx, .. } = msg; - let res = match tables.blobs().get(hash).context(StorageSnafu)? { + let res = match tables + .blobs() + .get(hash) + .map_err(|e| e!(ActorError::Storage, e))? + { Some(entry) => match entry.value() { EntryState::Complete { data_location, .. } => match data_location { DataLocation::Inline(_) => { - let Some(data) = tables.inline_data().get(hash).context(StorageSnafu)? else { + let Some(data) = tables + .inline_data() + .get(hash) + .map_err(|e| e!(ActorError::Storage, e))? + else { return Err(ActorError::inconsistent(format!( "inconsistent database state: {} not found", hash.to_hex() @@ -312,7 +346,11 @@ async fn handle_list_tags(msg: ListTagsMsg, tables: &impl ReadableTables) -> Act let from = from.map(Bound::Included).unwrap_or(Bound::Unbounded); let to = to.map(Bound::Excluded).unwrap_or(Bound::Unbounded); let mut res = Vec::new(); - for item in tables.tags().range((from, to)).context(StorageSnafu)? { + for item in tables + .tags() + .range((from, to)) + .map_err(|e| e!(ActorError::Storage, e))? + { match item { Ok((k, v)) => { let v = v.value(); @@ -325,9 +363,7 @@ async fn handle_list_tags(msg: ListTagsMsg, tables: &impl ReadableTables) -> Act res.push(crate::api::Result::Ok(info)); } } - Err(e) => { - res.push(Err(crate::api::Error::other(e))); - } + Err(e) => res.push(Err(crate::api::Error::other(e))), } } tx.send(res).await.ok(); @@ -345,11 +381,7 @@ fn handle_update( } = cmd; protected.insert(hash); trace!("updating hash {} to {}", hash.to_hex(), state.fmt_short()); - let old_entry_opt = tables - .blobs - .get(hash) - .context(StorageSnafu)? - .map(|e| e.value()); + let old_entry_opt = tables.blobs.get(hash)?.map(|e| e.value()); let (state, data, outboard): (_, Option, Option) = match state { EntryState::Complete { data_location, @@ -381,18 +413,21 @@ fn handle_update( } None => state, }; - tables.blobs.insert(hash, state).context(StorageSnafu)?; + tables + .blobs + .insert(hash, state) + .map_err(|e| e!(ActorError::Storage, e))?; if let Some(data) = data { tables .inline_data .insert(hash, data.as_ref()) - .context(StorageSnafu)?; + .map_err(|e| e!(ActorError::Storage, e))?; } if let Some(outboard) = outboard { tables .inline_outboard .insert(hash, outboard.as_ref()) - .context(StorageSnafu)?; + .map_err(|e| e!(ActorError::Storage, e))?; } if let Some(tx) = tx { tx.send(Ok(())); @@ -424,18 +459,21 @@ fn handle_set(cmd: Set, protected: &mut HashSet, tables: &mut Tables) -> A } EntryState::Partial { size } => (EntryState::Partial { size }, None, None), }; - tables.blobs.insert(hash, state).context(StorageSnafu)?; + tables + .blobs + .insert(hash, state) + .map_err(|e| e!(ActorError::Storage, e))?; if let Some(data) = data { tables .inline_data .insert(hash, data.as_ref()) - .context(StorageSnafu)?; + .map_err(|e| e!(ActorError::Storage, e))?; } if let Some(outboard) = outboard { tables .inline_outboard .insert(hash, outboard.as_ref()) - .context(StorageSnafu)?; + .map_err(|e| e!(ActorError::Storage, e))?; } tx.send(Ok(())); Ok(()) @@ -473,12 +511,12 @@ impl Actor { cmds: tokio::sync::mpsc::Receiver, mut ds: DeleteHandle, options: BatchOptions, - ) -> anyhow::Result { + ) -> Result { debug!("creating or opening meta database at {}", db_path.display()); let mut db = match redb::Database::create(db_path) { Ok(db) => db, Err(DatabaseError::UpgradeRequired(1)) => { - return Err(anyhow::anyhow!("migration from v1 no longer supported")); + return Err(anyerr!("migration from v1 no longer supported").into()); } Err(err) => return Err(err.into()), }; @@ -537,7 +575,7 @@ impl Actor { trace!("delete {hash}: skip (protected)"); continue; } - if let Some(entry) = tables.blobs.remove(hash).context(StorageSnafu)? { + if let Some(entry) = tables.blobs.remove(hash)? { match entry.value() { EntryState::Complete { data_location, @@ -546,7 +584,7 @@ impl Actor { trace!("delete {hash}: currently complete. will be deleted."); match data_location { DataLocation::Inline(_) => { - tables.inline_data.remove(hash).context(StorageSnafu)?; + tables.inline_data.remove(hash)?; } DataLocation::Owned(_) => { // mark the data for deletion @@ -556,7 +594,7 @@ impl Actor { } match outboard_location { OutboardLocation::Inline(_) => { - tables.inline_outboard.remove(hash).context(StorageSnafu)?; + tables.inline_outboard.remove(hash)?; } OutboardLocation::Owned => { // mark the outboard for deletion @@ -607,10 +645,7 @@ impl Actor { let tag = Tag::auto(SystemTime::now(), |x| { matches!(tables.tags.get(Tag(Bytes::copy_from_slice(x))), Ok(Some(_))) }); - tables - .tags - .insert(tag.clone(), value) - .context(StorageSnafu)?; + tables.tags.insert(tag.clone(), value)?; tag }; tx.send(Ok(tag.clone())).await.ok(); @@ -626,14 +661,11 @@ impl Actor { } = cmd; let from = from.map(Bound::Included).unwrap_or(Bound::Unbounded); let to = to.map(Bound::Excluded).unwrap_or(Bound::Unbounded); - let removing = tables - .tags - .extract_from_if((from, to), |_, _| true) - .context(StorageSnafu)?; + let removing = tables.tags.extract_from_if((from, to), |_, _| true)?; // drain the iterator to actually remove the tags let mut deleted = 0; for res in removing { - res.context(StorageSnafu)?; + res?; deleted += 1; } tx.send(Ok(deleted)).await.ok(); @@ -647,7 +679,7 @@ impl Actor { tx, .. } = cmd; - let value = match tables.tags.remove(from).context(StorageSnafu)? { + let value = match tables.tags.remove(from)? { Some(value) => value.value(), None => { tx.send(Err(api::Error::io( @@ -659,7 +691,7 @@ impl Actor { return Ok(()); } }; - tables.tags.insert(to, value).context(StorageSnafu)?; + tables.tags.insert(to, value)?; tx.send(Ok(())).await.ok(); Ok(()) } @@ -737,8 +769,10 @@ impl Actor { } TopLevelCommand::Snapshot(cmd) => { trace!("{cmd:?}"); - let txn = db.begin_read().context(TransactionSnafu)?; - let snapshot = ReadOnlyTables::new(&txn).context(TableSnafu)?; + let txn = db + .begin_read() + .map_err(|e| e!(ActorError::Transaction, e))?; + let snapshot = ReadOnlyTables::new(&txn).map_err(|e| e!(ActorError::Table, e))?; cmd.tx.send(snapshot).ok(); None } @@ -764,8 +798,10 @@ impl Actor { Command::ReadOnly(cmd) => { let op = TxnNum::Read(op); self.cmds.push_back(cmd.into()).ok(); - let tx = db.begin_read().context(TransactionSnafu)?; - let tables = ReadOnlyTables::new(&tx).context(TableSnafu)?; + let tx = db + .begin_read() + .map_err(|e| e!(ActorError::Transaction, e))?; + let tables = ReadOnlyTables::new(&tx).map_err(|e| e!(ActorError::Table, e))?; let timeout = n0_future::time::sleep(self.options.max_read_duration); pin!(timeout); let mut n = 0; @@ -782,8 +818,11 @@ impl Actor { let op = TxnNum::Write(op); self.cmds.push_back(cmd.into()).ok(); let ftx = self.ds.begin_write(); - let tx = db.begin_write().context(TransactionSnafu)?; - let mut tables = Tables::new(&tx, &ftx).context(TableSnafu)?; + let tx = db + .begin_write() + .map_err(|e| e!(ActorError::Transaction, e))?; + let mut tables = + Tables::new(&tx, &ftx).map_err(|e| e!(ActorError::Table, e))?; let timeout = n0_future::time::sleep(self.options.max_read_duration); pin!(timeout); let mut n = 0; @@ -800,7 +839,7 @@ impl Actor { } } drop(tables); - tx.commit().context(CommitSnafu)?; + tx.commit().map_err(|e| e!(ActorError::Commit, e))?; ftx.commit(); } } @@ -853,7 +892,11 @@ fn load_data( ) -> ActorResult> { Ok(match location { DataLocation::Inline(()) => { - let Some(data) = tables.inline_data().get(hash).context(StorageSnafu)? else { + let Some(data) = tables + .inline_data() + .get(hash) + .map_err(|e| e!(ActorError::Storage, e))? + else { return Err(ActorError::inconsistent(format!( "inconsistent database state: {} should have inline data but does not", hash.to_hex() @@ -874,7 +917,11 @@ fn load_outboard( Ok(match location { OutboardLocation::NotNeeded => OutboardLocation::NotNeeded, OutboardLocation::Inline(_) => { - let Some(outboard) = tables.inline_outboard().get(hash).context(StorageSnafu)? else { + let Some(outboard) = tables + .inline_outboard() + .get(hash) + .map_err(|e| e!(ActorError::Storage, e))? + else { return Err(ActorError::inconsistent(format!( "inconsistent database state: {} should have inline outboard but does not", hash.to_hex() diff --git a/src/store/fs/meta/proto.rs b/src/store/fs/meta/proto.rs index 6f4aaa6ce..3b1f19532 100644 --- a/src/store/fs/meta/proto.rs +++ b/src/store/fs/meta/proto.rs @@ -45,7 +45,7 @@ pub struct GetResult { /// of the inline data and inline outboard tables if necessary. #[derive(Debug)] pub struct Dump { - pub tx: oneshot::Sender>, + pub tx: oneshot::Sender>, pub span: Span, } diff --git a/src/store/mem.rs b/src/store/mem.rs index 918338efc..1d1583ac3 100644 --- a/src/store/mem.rs +++ b/src/store/mem.rs @@ -28,6 +28,7 @@ use bao_tree::{ }; use bytes::Bytes; use irpc::channel::mpsc; +use n0_error::{Result, StdResultExt}; use n0_future::{ future::yield_now, task::{JoinError, JoinSet}, @@ -105,7 +106,7 @@ impl Default for MemStore { #[derive(derive_more::From)] enum TaskResult { Unit(()), - Import(anyhow::Result), + Import(Result), Scope(Scope), } @@ -445,7 +446,7 @@ impl Actor { tx.send(tt).await.ok(); } - async fn finish_import(&mut self, res: anyhow::Result) { + async fn finish_import(&mut self, res: Result) { let import_data = match res { Ok(entry) => entry, Err(e) => { @@ -709,7 +710,7 @@ async fn import_bytes( scope: Scope, format: BlobFormat, tx: mpsc::Sender, -) -> anyhow::Result { +) -> Result { tx.send(AddProgressItem::Size(data.len() as u64)).await?; tx.send(AddProgressItem::CopyDone).await?; let outboard = PreOrderMemOutboard::create(&data, IROH_BLOCK_SIZE); @@ -727,7 +728,7 @@ async fn import_byte_stream( format: BlobFormat, mut rx: mpsc::Receiver, tx: mpsc::Sender, -) -> anyhow::Result { +) -> Result { let mut res = Vec::new(); loop { match rx.recv().await { @@ -755,16 +756,16 @@ async fn import_byte_stream( } #[cfg(wasm_browser)] -async fn import_path(cmd: ImportPathMsg) -> anyhow::Result { +async fn import_path(cmd: ImportPathMsg) -> Result { let _: ImportPathRequest = cmd.inner; - Err(anyhow::anyhow!( + Err(n0_error::anyerr!( "import_path is not supported in the browser" )) } #[instrument(skip_all, fields(path = %cmd.path.display()))] #[cfg(not(wasm_browser))] -async fn import_path(cmd: ImportPathMsg) -> anyhow::Result { +async fn import_path(cmd: ImportPathMsg) -> Result { use tokio::io::AsyncReadExt; let ImportPathMsg { inner: @@ -1044,7 +1045,7 @@ impl BaoFileStorageSubscriber { /// Forward observed *values* to the given sender /// /// Returns an error if sending fails, or if the last sender is dropped - pub async fn forward(mut self, mut tx: mpsc::Sender) -> anyhow::Result<()> { + pub async fn forward(mut self, mut tx: mpsc::Sender) -> Result<()> { let value = self.receiver.borrow().bitfield(); tx.send(value).await?; loop { @@ -1058,7 +1059,7 @@ impl BaoFileStorageSubscriber { /// /// Returns an error if sending fails, or if the last sender is dropped #[allow(dead_code)] - pub async fn forward_delta(mut self, mut tx: mpsc::Sender) -> anyhow::Result<()> { + pub async fn forward_delta(mut self, mut tx: mpsc::Sender) -> Result<()> { let value = self.receiver.borrow().bitfield(); let mut old = value.clone(); tx.send(value).await?; @@ -1074,13 +1075,13 @@ impl BaoFileStorageSubscriber { } } - async fn update_or_closed(&mut self, tx: &mut mpsc::Sender) -> anyhow::Result<()> { + async fn update_or_closed(&mut self, tx: &mut mpsc::Sender) -> Result<()> { tokio::select! { _ = tx.closed() => { // the sender is closed, we are done Err(n0_error::e!(irpc::channel::SendError::ReceiverClosed).into()) } - e = self.receiver.changed() => Ok(e?), + e = self.receiver.changed() => Ok(e.anyerr()?), } } } diff --git a/src/store/readonly_mem.rs b/src/store/readonly_mem.rs index 649acdcbc..7fc734827 100644 --- a/src/store/readonly_mem.rs +++ b/src/store/readonly_mem.rs @@ -96,7 +96,7 @@ impl Actor { async fn handle_command(&mut self, cmd: Command) -> Option> { match cmd { Command::ImportBao(ImportBaoMsg { tx, .. }) => { - tx.send(Err(api::Error::Io(io::Error::other( + tx.send(Err(api::Error::from(io::Error::other( "import not supported", )))) .await diff --git a/src/store/util.rs b/src/store/util.rs index 03630a6fc..b4d9caa26 100644 --- a/src/store/util.rs +++ b/src/store/util.rs @@ -417,17 +417,19 @@ impl bao_tree::io::mixed::Sender for BaoTreeSender { #[cfg(feature = "fs-store")] pub mod tests { use bao_tree::{io::outboard::PreOrderMemOutboard, ChunkRanges}; + use n0_error::{Result, StdResultExt}; use crate::{hash::Hash, store::IROH_BLOCK_SIZE}; /// Create n0 flavoured bao. Note that this can be used to request ranges below a chunk group size, /// which can not be exported via bao because we don't store hashes below the chunk group level. - pub fn create_n0_bao(data: &[u8], ranges: &ChunkRanges) -> anyhow::Result<(Hash, Vec)> { + pub fn create_n0_bao(data: &[u8], ranges: &ChunkRanges) -> Result<(Hash, Vec)> { let outboard = PreOrderMemOutboard::create(data, IROH_BLOCK_SIZE); let mut encoded = Vec::new(); let size = data.len() as u64; encoded.extend_from_slice(&size.to_le_bytes()); - bao_tree::io::sync::encode_ranges_validated(data, &outboard, ranges, &mut encoded)?; + bao_tree::io::sync::encode_ranges_validated(data, &outboard, ranges, &mut encoded) + .anyerr()?; Ok((outboard.root.into(), encoded)) } } diff --git a/src/test.rs b/src/test.rs index 3ecb1c87a..f6c05f623 100644 --- a/src/test.rs +++ b/src/test.rs @@ -14,7 +14,7 @@ pub async fn create_random_blobs( num_blobs: usize, blob_size: impl Fn(usize, &mut R) -> usize, mut rand: R, -) -> anyhow::Result> { +) -> n0_error::Result> { // generate sizes and seeds, non-parrallelized so it is deterministic let sizes = (0..num_blobs) .map(|n| (blob_size(n, &mut rand), rand.random::())) @@ -39,7 +39,7 @@ pub async fn add_hash_sequences( num_seqs: usize, seq_size: impl Fn(usize, &mut R) -> usize, mut rand: R, -) -> anyhow::Result> { +) -> n0_error::Result> { let infos = stream::iter(0..num_seqs) .then(|n| { let size = seq_size(n, &mut rand); diff --git a/src/ticket.rs b/src/ticket.rs index 55ef00ae5..dbca0c7c1 100644 --- a/src/ticket.rs +++ b/src/ticket.rs @@ -1,9 +1,9 @@ //! Tickets for blobs. use std::{collections::BTreeSet, net::SocketAddr, str::FromStr}; -use anyhow::Result; use iroh::{EndpointAddr, EndpointId, RelayUrl}; use iroh_tickets::{ParseError, Ticket}; +use n0_error::Result; use serde::{Deserialize, Serialize}; use crate::{BlobFormat, Hash, HashAndFormat}; diff --git a/src/util/connection_pool.rs b/src/util/connection_pool.rs index fd66b4531..eda360edf 100644 --- a/src/util/connection_pool.rs +++ b/src/util/connection_pool.rs @@ -23,11 +23,11 @@ use iroh::{ endpoint::{ConnectError, Connection}, Endpoint, EndpointId, }; +use n0_error::{e, stack_error}; use n0_future::{ future::{self}, FuturesUnordered, MaybeFuture, Stream, StreamExt, }; -use snafu::Snafu; use tokio::sync::{ mpsc::{self, error::SendError as TokioSendError}, oneshot, Notify, @@ -108,45 +108,49 @@ impl ConnectionRef { /// /// This includes the normal iroh connection errors as well as pool specific /// errors such as timeouts and connection limits. -#[derive(Debug, Clone, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] +#[derive(Clone)] pub enum PoolConnectError { /// Connection pool is shut down - Shutdown, + #[error("Connection pool is shut down")] + Shutdown {}, /// Timeout during connect - Timeout, + #[error("Timeout during connect")] + Timeout {}, /// Too many connections - TooManyConnections, + #[error("Too many connections")] + TooManyConnections {}, /// Error during connect + #[error(transparent)] ConnectError { source: Arc }, /// Error during on_connect callback - OnConnectError { source: Arc }, + #[error(transparent)] + OnConnectError { + #[error(std_err)] + source: Arc, + }, } impl From for PoolConnectError { fn from(e: ConnectError) -> Self { - PoolConnectError::ConnectError { - source: Arc::new(e), - } + e!(PoolConnectError::ConnectError, Arc::new(e)) } } impl From for PoolConnectError { fn from(e: io::Error) -> Self { - PoolConnectError::OnConnectError { - source: Arc::new(e), - } + e!(PoolConnectError::OnConnectError, Arc::new(e)) } } /// Error when calling a fn on the [`ConnectionPool`]. /// /// The only thing that can go wrong is that the connection pool is shut down. -#[derive(Debug, Snafu)] -#[snafu(module)] +#[stack_error(derive, add_meta)] pub enum ConnectionPoolError { /// The connection pool has been shut down - Shutdown, + #[error("The connection pool has been shut down")] + Shutdown {}, } enum ActorMessage { @@ -195,7 +199,7 @@ impl Context { // Connect to the node let state = n0_future::time::timeout(context.options.connect_timeout, conn_fut) .await - .map_err(|_| PoolConnectError::Timeout) + .map_err(|_| e!(PoolConnectError::Timeout)) .and_then(|r| r); let conn_close = match &state { Ok(conn) => { @@ -360,7 +364,9 @@ impl Actor { trace!("removing oldest idle connection {}", idle); self.connections.remove(&idle); } else { - msg.tx.send(Err(PoolConnectError::TooManyConnections)).ok(); + msg.tx + .send(Err(e!(PoolConnectError::TooManyConnections))) + .ok(); return; } } @@ -437,8 +443,8 @@ impl ConnectionPool { self.tx .send(ActorMessage::RequestRef(RequestRef { id, tx })) .await - .map_err(|_| PoolConnectError::Shutdown)?; - rx.await.map_err(|_| PoolConnectError::Shutdown)? + .map_err(|_| e!(PoolConnectError::Shutdown))?; + rx.await.map_err(|_| e!(PoolConnectError::Shutdown))? } /// Close an existing connection, if it exists @@ -449,7 +455,7 @@ impl ConnectionPool { self.tx .send(ActorMessage::ConnectionShutdown { id }) .await - .map_err(|_| ConnectionPoolError::Shutdown)?; + .map_err(|_| e!(ConnectionPoolError::Shutdown))?; Ok(()) } @@ -463,7 +469,7 @@ impl ConnectionPool { self.tx .send(ActorMessage::ConnectionIdle { id }) .await - .map_err(|_| ConnectionPoolError::Shutdown)?; + .map_err(|_| e!(ConnectionPoolError::Shutdown))?; Ok(()) } } @@ -545,8 +551,8 @@ mod tests { protocol::{AcceptError, ProtocolHandler, Router}, Endpoint, EndpointAddr, EndpointId, RelayMode, SecretKey, TransportAddr, Watcher, }; + use n0_error::{AnyError, Result, StdResultExt}; use n0_future::{io, stream, BufferedStreamExt, StreamExt}; - use n0_snafu::ResultExt; use testresult::TestResult; use tracing::trace; @@ -580,14 +586,14 @@ mod tests { } } - async fn echo_client(conn: &Connection, text: &[u8]) -> n0_snafu::Result> { + async fn echo_client(conn: &Connection, text: &[u8]) -> Result> { let conn_id = conn.stable_id(); let id = conn.remote_id(); trace!(%id, %conn_id, "Sending echo request"); - let (mut send, mut recv) = conn.open_bi().await.e()?; - send.write_all(text).await.e()?; - send.finish().e()?; - let response = recv.read_to_end(1000).await.e()?; + let (mut send, mut recv) = conn.open_bi().await.anyerr()?; + send.write_all(text).await.anyerr()?; + send.finish().anyerr()?; + let response = recv.read_to_end(1000).await.anyerr()?; trace!(%id, %conn_id, "Received echo response"); Ok(response) } @@ -645,7 +651,7 @@ mod tests { &self, id: EndpointId, text: Vec, - ) -> Result), n0_snafu::Error>, PoolConnectError> { + ) -> Result), AnyError>, PoolConnectError> { let conn = self.pool.get_or_connect(id).await?; let id = conn.stable_id(); match echo_client(&conn, &text).await { @@ -685,7 +691,7 @@ mod tests { // trying to connect to an id for which we have info, but the other // end is not listening, will lead to a timeout. let res = client.echo(non_listening, b"Hello, world!".to_vec()).await; - assert!(matches!(res, Err(PoolConnectError::Timeout))); + assert!(matches!(res, Err(PoolConnectError::Timeout { .. }))); } Ok(()) }