Thanks to visit codestin.com
Credit goes to n0-computer.github.io

iroh_blobs/
get.rs

1//! The low level client side API
2//!
3//! Note that while using this API directly is fine, a simpler way to get data
4//! to a store is to use the [`crate::api::remote`] API, in particular the
5//! [`crate::api::remote::Remote::fetch`] function to download data to your
6//! local store.
7//!
8//! To get data, create a connection using an [`iroh::Endpoint`].
9//!
10//! Create a [`crate::protocol::GetRequest`] describing the data you want to get.
11//!
12//! Then create a state machine using [fsm::start] and
13//! drive it to completion by calling next on each state.
14//!
15//! For some states you have to provide additional arguments when calling next,
16//! or you can choose to finish early.
17//!
18//! [iroh]: https://docs.rs/iroh
19use std::{
20    fmt::{self, Debug},
21    time::Duration,
22};
23
24use bao_tree::{io::fsm::BaoContentItem, ChunkNum};
25use fsm::RequestCounters;
26use n0_error::Result;
27use n0_future::time::Instant;
28use serde::{Deserialize, Serialize};
29use tracing::{debug, error};
30
31use crate::{
32    protocol::ChunkRangesSeq,
33    store::IROH_BLOCK_SIZE,
34    util::{RecvStream, SendStream},
35    Hash,
36};
37
38mod error;
39pub mod request;
40pub use error::{GetError, GetResult};
41
42type DefaultReader = iroh::endpoint::RecvStream;
43type DefaultWriter = iroh::endpoint::SendStream;
44
45pub struct StreamPair<R: RecvStream = DefaultReader, W: SendStream = DefaultWriter> {
46    pub connection_id: u64,
47    pub t0: Instant,
48    pub recv: R,
49    pub send: W,
50}
51
52impl<R: RecvStream, W: SendStream> StreamPair<R, W> {
53    pub fn new(connection_id: u64, recv: R, send: W) -> Self {
54        Self {
55            t0: Instant::now(),
56            recv,
57            send,
58            connection_id,
59        }
60    }
61}
62
63/// Stats about the transfer.
64#[derive(
65    Debug,
66    Default,
67    Clone,
68    PartialEq,
69    Eq,
70    Serialize,
71    Deserialize,
72    derive_more::Deref,
73    derive_more::DerefMut,
74)]
75pub struct Stats {
76    /// Counters
77    #[deref]
78    #[deref_mut]
79    pub counters: RequestCounters,
80    /// The time it took to transfer the data
81    pub elapsed: Duration,
82}
83
84impl Stats {
85    /// Transfer rate in megabits per second
86    pub fn mbits(&self) -> f64 {
87        let data_len_bit = self.total_bytes_read() * 8;
88        data_len_bit as f64 / (1000. * 1000.) / self.elapsed.as_secs_f64()
89    }
90
91    pub fn total_bytes_read(&self) -> u64 {
92        self.payload_bytes_read + self.other_bytes_read
93    }
94
95    pub fn combine(&mut self, that: &Stats) {
96        self.payload_bytes_written += that.payload_bytes_written;
97        self.other_bytes_written += that.other_bytes_written;
98        self.payload_bytes_read += that.payload_bytes_read;
99        self.other_bytes_read += that.other_bytes_read;
100        self.elapsed += that.elapsed;
101    }
102}
103
104/// Finite state machine for get responses.
105///
106/// This is the low level API for getting data from a peer.
107#[doc = include_str!("../docs/img/get_machine.drawio.svg")]
108pub mod fsm {
109    use std::{io, result};
110
111    use bao_tree::{
112        io::fsm::{OutboardMut, ResponseDecoder, ResponseDecoderNext},
113        BaoTree, ChunkRanges, TreeNode,
114    };
115    use derive_more::From;
116    use iroh::endpoint::Connection;
117    use iroh_io::AsyncSliceWriter;
118    use n0_error::{e, stack_error, AnyError};
119
120    use super::*;
121    use crate::{
122        protocol::{
123            GetManyRequest, GetRequest, NonEmptyRequestRangeSpecIter, Request, MAX_MESSAGE_SIZE,
124        },
125        util::{RecvStream, RecvStreamAsyncStreamReader, SendStream},
126    };
127
128    self_cell::self_cell! {
129        struct RangesIterInner {
130            owner: ChunkRangesSeq,
131            #[not_covariant]
132            dependent: NonEmptyRequestRangeSpecIter,
133        }
134    }
135
136    /// The entry point of the get response machine
137    pub fn start(
138        connection: Connection,
139        request: GetRequest,
140        counters: RequestCounters,
141    ) -> AtInitial {
142        AtInitial::new(connection, request, counters)
143    }
144
145    /// Start with a get many request. Todo: turn this into distinct states.
146    pub async fn start_get_many(
147        connection: Connection,
148        request: GetManyRequest,
149        counters: RequestCounters,
150    ) -> std::result::Result<Result<AtStartChild, AtClosing>, GetError> {
151        let start = Instant::now();
152        let (mut writer, reader) = connection
153            .open_bi()
154            .await
155            .map_err(|e| e!(InitialNextError::Open, e.into()))?;
156        let request = Request::GetMany(request);
157        let request_bytes = postcard::to_stdvec(&request)
158            .map_err(|source| e!(GetError::BadRequest, AnyError::from_std(source)))?;
159        writer
160            .send_bytes(request_bytes.into())
161            .await
162            .map_err(|source| e!(ConnectedNextError::Write, source))?;
163        let Request::GetMany(request) = request else {
164            unreachable!();
165        };
166        let mut ranges_iter = RangesIter::new(request.ranges.clone());
167        let first_item = ranges_iter.next();
168        let misc = Box::new(Misc {
169            counters,
170            start,
171            ranges_iter,
172        });
173        Ok(match first_item {
174            Some((child_offset, child_ranges)) => Ok(AtStartChild {
175                ranges: child_ranges,
176                reader,
177                misc,
178                offset: child_offset,
179            }),
180            None => Err(AtClosing::new(misc, reader, true)),
181        })
182    }
183
184    /// Owned iterator for the ranges in a request
185    ///
186    /// We need an owned iterator for a fsm style API, otherwise we would have
187    /// to drag a lifetime around every single state.
188    struct RangesIter(RangesIterInner);
189
190    impl fmt::Debug for RangesIter {
191        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
192            f.debug_struct("RangesIter").finish()
193        }
194    }
195
196    impl RangesIter {
197        pub fn new(owner: ChunkRangesSeq) -> Self {
198            Self(RangesIterInner::new(owner, |owner| {
199                owner.iter_non_empty_infinite()
200            }))
201        }
202
203        pub fn offset(&self) -> u64 {
204            self.0.with_dependent(|_owner, iter| iter.offset())
205        }
206    }
207
208    impl Iterator for RangesIter {
209        type Item = (u64, ChunkRanges);
210
211        fn next(&mut self) -> Option<Self::Item> {
212            self.0.with_dependent_mut(|_owner, iter| {
213                iter.next().map(|(offset, ranges)| (offset, ranges.clone()))
214            })
215        }
216    }
217
218    /// Initial state of the get response machine
219    #[derive(Debug)]
220    pub struct AtInitial {
221        connection: Connection,
222        request: GetRequest,
223        counters: RequestCounters,
224    }
225
226    impl AtInitial {
227        /// Create a new get response
228        ///
229        /// `connection` is an existing connection
230        /// `request` is the request to be sent
231        pub fn new(connection: Connection, request: GetRequest, counters: RequestCounters) -> Self {
232            Self {
233                connection,
234                request,
235                counters,
236            }
237        }
238
239        /// Initiate a new bidi stream to use for the get response
240        pub async fn next(self) -> Result<AtConnected, InitialNextError> {
241            let start = Instant::now();
242            let (writer, reader) = self
243                .connection
244                .open_bi()
245                .await
246                .map_err(|e| e!(InitialNextError::Open, e.into()))?;
247            Ok(AtConnected {
248                start,
249                reader,
250                writer,
251                request: self.request,
252                counters: self.counters,
253            })
254        }
255    }
256
257    /// Error that you can get from [`AtInitial::next`]
258    #[stack_error(derive, add_meta, from_sources)]
259    pub enum InitialNextError {
260        #[error("open: {source}")]
261        Open {
262            #[error(std_err)]
263            source: io::Error,
264        },
265    }
266
267    /// State of the get response machine after the handshake has been sent
268    #[derive(Debug)]
269    pub struct AtConnected<R: RecvStream = DefaultReader, W: SendStream = DefaultWriter> {
270        start: Instant,
271        reader: R,
272        writer: W,
273        request: GetRequest,
274        counters: RequestCounters,
275    }
276
277    /// Possible next states after the handshake has been sent
278    #[derive(Debug, From)]
279    pub enum ConnectedNext<R: RecvStream = DefaultReader> {
280        /// First response is either a collection or a single blob
281        StartRoot(AtStartRoot<R>),
282        /// First response is a child
283        StartChild(AtStartChild<R>),
284        /// Request is empty
285        Closing(AtClosing<R>),
286    }
287
288    /// Error that you can get from [`AtConnected::next`]
289    #[stack_error(derive, add_meta)]
290    pub enum ConnectedNextError {
291        /// Error when serializing the request
292        #[error("postcard ser: {source}")]
293        PostcardSer {
294            #[error(std_err)]
295            source: postcard::Error,
296        },
297        /// The serialized request is too long to be sent
298        #[error("request too big")]
299        RequestTooBig {},
300        /// Error when writing the request to the [`SendStream`].
301        #[error("write: {source}")]
302        Write {
303            #[error(std_err)]
304            source: io::Error,
305        },
306    }
307
308    impl<R: RecvStream, W: SendStream> AtConnected<R, W> {
309        pub fn new(
310            start: Instant,
311            reader: R,
312            writer: W,
313            request: GetRequest,
314            counters: RequestCounters,
315        ) -> Self {
316            Self {
317                start,
318                reader,
319                writer,
320                request,
321                counters,
322            }
323        }
324
325        /// Send the request and move to the next state
326        ///
327        /// The next state will be either `StartRoot` or `StartChild` depending on whether
328        /// the request requests part of the collection or not.
329        ///
330        /// If the request is empty, this can also move directly to `Finished`.
331        pub async fn next(self) -> Result<ConnectedNext<R>, ConnectedNextError> {
332            let Self {
333                start,
334                reader,
335                mut writer,
336                mut request,
337                mut counters,
338            } = self;
339            // 1. Send Request
340            counters.other_bytes_written += {
341                debug!("sending request");
342                let wrapped = Request::Get(request);
343                let request_bytes = postcard::to_stdvec(&wrapped)
344                    .map_err(|source| e!(ConnectedNextError::PostcardSer, source))?;
345                let Request::Get(x) = wrapped else {
346                    unreachable!();
347                };
348                request = x;
349
350                if request_bytes.len() > MAX_MESSAGE_SIZE {
351                    return Err(e!(ConnectedNextError::RequestTooBig));
352                }
353
354                // write the request itself
355                let len = request_bytes.len() as u64;
356                writer
357                    .send_bytes(request_bytes.into())
358                    .await
359                    .map_err(|source| e!(ConnectedNextError::Write, source))?;
360                writer
361                    .sync()
362                    .await
363                    .map_err(|source| e!(ConnectedNextError::Write, source))?;
364                len
365            };
366
367            // 2. Finish writing before expecting a response
368            drop(writer);
369
370            let hash = request.hash;
371            let ranges_iter = RangesIter::new(request.ranges);
372            // this is in a box so we don't have to memcpy it on every state transition
373            let mut misc = Box::new(Misc {
374                counters,
375                start,
376                ranges_iter,
377            });
378            Ok(match misc.ranges_iter.next() {
379                Some((offset, ranges)) => {
380                    if offset == 0 {
381                        AtStartRoot {
382                            reader,
383                            ranges,
384                            misc,
385                            hash,
386                        }
387                        .into()
388                    } else {
389                        AtStartChild {
390                            reader,
391                            ranges,
392                            misc,
393                            offset,
394                        }
395                        .into()
396                    }
397                }
398                None => AtClosing::new(misc, reader, true).into(),
399            })
400        }
401    }
402
403    /// State of the get response when we start reading a collection
404    #[derive(Debug)]
405    pub struct AtStartRoot<R: RecvStream = DefaultReader> {
406        ranges: ChunkRanges,
407        reader: R,
408        misc: Box<Misc>,
409        hash: Hash,
410    }
411
412    /// State of the get response when we start reading a child
413    #[derive(Debug)]
414    pub struct AtStartChild<R: RecvStream = DefaultReader> {
415        ranges: ChunkRanges,
416        reader: R,
417        misc: Box<Misc>,
418        offset: u64,
419    }
420
421    impl<R: RecvStream> AtStartChild<R> {
422        /// The offset of the child we are currently reading
423        ///
424        /// This must be used to determine the hash needed to call next.
425        /// If this is larger than the number of children in the collection,
426        /// you can call finish to stop reading the response.
427        pub fn offset(&self) -> u64 {
428            self.offset
429        }
430
431        /// The ranges we have requested for the child
432        pub fn ranges(&self) -> &ChunkRanges {
433            &self.ranges
434        }
435
436        /// Go into the next state, reading the header
437        ///
438        /// This requires passing in the hash of the child for validation
439        pub fn next(self, hash: Hash) -> AtBlobHeader<R> {
440            AtBlobHeader {
441                reader: self.reader,
442                ranges: self.ranges,
443                misc: self.misc,
444                hash,
445            }
446        }
447
448        /// Finish the get response without reading further
449        ///
450        /// This is used if you know that there are no more children from having
451        /// read the collection, or when you want to stop reading the response
452        /// early.
453        pub fn finish(self) -> AtClosing<R> {
454            AtClosing::new(self.misc, self.reader, false)
455        }
456    }
457
458    impl<R: RecvStream> AtStartRoot<R> {
459        /// The ranges we have requested for the child
460        pub fn ranges(&self) -> &ChunkRanges {
461            &self.ranges
462        }
463
464        /// Hash of the root blob
465        pub fn hash(&self) -> Hash {
466            self.hash
467        }
468
469        /// Go into the next state, reading the header
470        ///
471        /// For the collection we already know the hash, since it was part of the request
472        pub fn next(self) -> AtBlobHeader<R> {
473            AtBlobHeader {
474                reader: self.reader,
475                ranges: self.ranges,
476                hash: self.hash,
477                misc: self.misc,
478            }
479        }
480
481        /// Finish the get response without reading further
482        pub fn finish(self) -> AtClosing<R> {
483            AtClosing::new(self.misc, self.reader, false)
484        }
485    }
486
487    /// State before reading a size header
488    #[derive(Debug)]
489    pub struct AtBlobHeader<R: RecvStream = DefaultReader> {
490        ranges: ChunkRanges,
491        reader: R,
492        misc: Box<Misc>,
493        hash: Hash,
494    }
495
496    /// Error that you can get from [`AtBlobHeader::next`]
497    #[stack_error(derive, add_meta)]
498    pub enum AtBlobHeaderNextError {
499        /// Eof when reading the size header
500        ///
501        /// This indicates that the provider does not have the requested data.
502        #[error("not found")]
503        NotFound {},
504        /// Generic io error
505        #[error("io: {source}")]
506        Read {
507            #[error(std_err)]
508            source: io::Error,
509        },
510    }
511
512    impl From<AtBlobHeaderNextError> for io::Error {
513        fn from(cause: AtBlobHeaderNextError) -> Self {
514            match cause {
515                AtBlobHeaderNextError::NotFound { .. } => {
516                    io::Error::new(io::ErrorKind::UnexpectedEof, cause)
517                }
518                AtBlobHeaderNextError::Read { source, .. } => source,
519            }
520        }
521    }
522
523    impl<R: RecvStream> AtBlobHeader<R> {
524        /// Read the size header, returning it and going into the `Content` state.
525        pub async fn next(mut self) -> Result<(AtBlobContent<R>, u64), AtBlobHeaderNextError> {
526            let mut size = [0; 8];
527            self.reader.recv_exact(&mut size).await.map_err(|cause| {
528                if cause.kind() == io::ErrorKind::UnexpectedEof {
529                    e!(AtBlobHeaderNextError::NotFound)
530                } else {
531                    e!(AtBlobHeaderNextError::Read, cause)
532                }
533            })?;
534            self.misc.other_bytes_read += 8;
535            let size = u64::from_le_bytes(size);
536            let stream = ResponseDecoder::new(
537                self.hash.into(),
538                self.ranges,
539                BaoTree::new(size, IROH_BLOCK_SIZE),
540                RecvStreamAsyncStreamReader::new(self.reader),
541            );
542            Ok((
543                AtBlobContent {
544                    stream,
545                    misc: self.misc,
546                },
547                size,
548            ))
549        }
550
551        /// Drain the response and throw away the result
552        pub async fn drain(self) -> result::Result<AtEndBlob<R>, DecodeError> {
553            let (content, _size) = self.next().await?;
554            content.drain().await
555        }
556
557        /// Concatenate the entire response into a vec
558        ///
559        /// For a request that does not request the complete blob, this will just
560        /// concatenate the ranges that were requested.
561        pub async fn concatenate_into_vec(
562            self,
563        ) -> result::Result<(AtEndBlob<R>, Vec<u8>), DecodeError> {
564            let (content, _size) = self.next().await?;
565            content.concatenate_into_vec().await
566        }
567
568        /// Write the entire blob to a slice writer.
569        pub async fn write_all<D: AsyncSliceWriter>(
570            self,
571            data: D,
572        ) -> result::Result<AtEndBlob<R>, DecodeError> {
573            let (content, _size) = self.next().await?;
574            let res = content.write_all(data).await?;
575            Ok(res)
576        }
577
578        /// Write the entire blob to a slice writer and to an optional outboard.
579        ///
580        /// The outboard is only written to if the blob is larger than a single
581        /// chunk group.
582        pub async fn write_all_with_outboard<D, O>(
583            self,
584            outboard: Option<O>,
585            data: D,
586        ) -> result::Result<AtEndBlob<R>, DecodeError>
587        where
588            D: AsyncSliceWriter,
589            O: OutboardMut,
590        {
591            let (content, _size) = self.next().await?;
592            let res = content.write_all_with_outboard(outboard, data).await?;
593            Ok(res)
594        }
595
596        /// The hash of the blob we are reading.
597        pub fn hash(&self) -> Hash {
598            self.hash
599        }
600
601        /// The ranges we have requested for the current hash.
602        pub fn ranges(&self) -> &ChunkRanges {
603            &self.ranges
604        }
605
606        /// The current offset of the blob we are reading.
607        pub fn offset(&self) -> u64 {
608            self.misc.ranges_iter.offset()
609        }
610    }
611
612    /// State while we are reading content
613    #[derive(Debug)]
614    pub struct AtBlobContent<R: RecvStream = DefaultReader> {
615        stream: ResponseDecoder<RecvStreamAsyncStreamReader<R>>,
616        misc: Box<Misc>,
617    }
618
619    /// Decode error that you can get once you have sent the request and are
620    /// decoding the response, e.g. from [`AtBlobContent::next`].
621    ///
622    /// This is similar to [`bao_tree::io::DecodeError`], but takes into account
623    /// that we are reading from a [`RecvStream`], so read errors will be
624    /// propagated as [`DecodeError::Read`], containing a [`io::Error`].
625    ///
626    /// When the provider finds that it does not have a chunk that we requested,
627    /// or that the chunk is invalid, it will stop sending data without producing
628    /// an error. This is indicated by either the [`DecodeError::ParentNotFound`] or
629    /// [`DecodeError::LeafNotFound`] variant, which can be used to detect that data
630    /// is missing but the connection as well that the provider is otherwise healthy.
631    ///
632    /// The [`DecodeError::ParentHashMismatch`] and [`DecodeError::LeafHashMismatch`]
633    /// variants indicate that the provider has sent us invalid data. A well-behaved
634    /// provider should never do this, so this is an indication that the provider is
635    /// not behaving correctly.
636    #[non_exhaustive]
637    #[stack_error(derive, add_meta)]
638    pub enum DecodeError {
639        /// A chunk was not found or invalid, so the provider stopped sending data
640        #[error("not found")]
641        ChunkNotFound {},
642        /// A parent was not found or invalid, so the provider stopped sending data
643        #[error("parent not found {node:?}")]
644        ParentNotFound { node: TreeNode },
645        /// A parent was not found or invalid, so the provider stopped sending data
646        #[error("chunk not found {num}")]
647        LeafNotFound { num: ChunkNum },
648        /// The hash of a parent did not match the expected hash
649        #[error("parent hash mismatch: {node:?}")]
650        ParentHashMismatch { node: TreeNode },
651        /// The hash of a leaf did not match the expected hash
652        #[error("leaf hash mismatch: {num}")]
653        LeafHashMismatch { num: ChunkNum },
654        /// Error when reading from the stream
655        #[error("read: {source}")]
656        Read {
657            #[error(std_err)]
658            source: io::Error,
659        },
660        /// A generic io error
661        #[error("io: {source}")]
662        Write {
663            #[error(std_err)]
664            source: io::Error,
665        },
666    }
667
668    impl DecodeError {
669        pub(crate) fn leaf_hash_mismatch(num: ChunkNum) -> Self {
670            e!(DecodeError::LeafHashMismatch { num })
671        }
672    }
673
674    impl From<AtBlobHeaderNextError> for DecodeError {
675        fn from(cause: AtBlobHeaderNextError) -> Self {
676            match cause {
677                AtBlobHeaderNextError::NotFound { .. } => e!(DecodeError::ChunkNotFound),
678                AtBlobHeaderNextError::Read { source, .. } => e!(DecodeError::Read, source),
679            }
680        }
681    }
682
683    impl From<DecodeError> for io::Error {
684        fn from(cause: DecodeError) -> Self {
685            match cause {
686                DecodeError::ParentNotFound { .. } => {
687                    io::Error::new(io::ErrorKind::UnexpectedEof, cause)
688                }
689                DecodeError::LeafNotFound { .. } => {
690                    io::Error::new(io::ErrorKind::UnexpectedEof, cause)
691                }
692                DecodeError::Read { source, .. } => source,
693                DecodeError::Write { source, .. } => source,
694                _ => io::Error::other(cause),
695            }
696        }
697    }
698
699    impl From<bao_tree::io::DecodeError> for DecodeError {
700        fn from(value: bao_tree::io::DecodeError) -> Self {
701            match value {
702                bao_tree::io::DecodeError::ParentNotFound(node) => {
703                    e!(DecodeError::ParentNotFound { node })
704                }
705                bao_tree::io::DecodeError::LeafNotFound(num) => {
706                    e!(DecodeError::LeafNotFound { num })
707                }
708                bao_tree::io::DecodeError::ParentHashMismatch(node) => {
709                    e!(DecodeError::ParentHashMismatch { node })
710                }
711                bao_tree::io::DecodeError::LeafHashMismatch(num) => {
712                    e!(DecodeError::LeafHashMismatch { num })
713                }
714                bao_tree::io::DecodeError::Io(cause) => e!(DecodeError::Read, cause),
715            }
716        }
717    }
718
719    /// The next state after reading a content item
720    #[derive(Debug, From)]
721    pub enum BlobContentNext<R: RecvStream> {
722        /// We expect more content
723        More(
724            (
725                AtBlobContent<R>,
726                result::Result<BaoContentItem, DecodeError>,
727            ),
728        ),
729        /// We are done with this blob
730        Done(AtEndBlob<R>),
731    }
732
733    impl<R: RecvStream> AtBlobContent<R> {
734        /// Read the next item, either content, an error, or the end of the blob
735        pub async fn next(self) -> BlobContentNext<R> {
736            match self.stream.next().await {
737                ResponseDecoderNext::More((stream, res)) => {
738                    let mut next = Self { stream, ..self };
739                    let res = res.map_err(DecodeError::from);
740                    match &res {
741                        Ok(BaoContentItem::Parent(_)) => {
742                            next.misc.other_bytes_read += 64;
743                        }
744                        Ok(BaoContentItem::Leaf(leaf)) => {
745                            next.misc.payload_bytes_read += leaf.data.len() as u64;
746                        }
747                        _ => {}
748                    }
749                    BlobContentNext::More((next, res))
750                }
751                ResponseDecoderNext::Done(stream) => BlobContentNext::Done(AtEndBlob {
752                    stream: stream.into_inner(),
753                    misc: self.misc,
754                }),
755            }
756        }
757
758        /// The geometry of the tree we are currently reading.
759        pub fn tree(&self) -> bao_tree::BaoTree {
760            self.stream.tree()
761        }
762
763        /// The hash of the blob we are reading.
764        pub fn hash(&self) -> Hash {
765            (*self.stream.hash()).into()
766        }
767
768        /// The current offset of the blob we are reading.
769        pub fn offset(&self) -> u64 {
770            self.misc.ranges_iter.offset()
771        }
772
773        /// Current stats
774        pub fn stats(&self) -> Stats {
775            Stats {
776                counters: self.misc.counters,
777                elapsed: self.misc.start.elapsed(),
778            }
779        }
780
781        /// Drain the response and throw away the result
782        pub async fn drain(self) -> result::Result<AtEndBlob<R>, DecodeError> {
783            let mut content = self;
784            loop {
785                match content.next().await {
786                    BlobContentNext::More((content1, res)) => {
787                        let _ = res?;
788                        content = content1;
789                    }
790                    BlobContentNext::Done(end) => {
791                        break Ok(end);
792                    }
793                }
794            }
795        }
796
797        /// Concatenate the entire response into a vec
798        pub async fn concatenate_into_vec(
799            self,
800        ) -> result::Result<(AtEndBlob<R>, Vec<u8>), DecodeError> {
801            let mut res = Vec::with_capacity(1024);
802            let mut curr = self;
803            let done = loop {
804                match curr.next().await {
805                    BlobContentNext::More((next, data)) => {
806                        if let BaoContentItem::Leaf(leaf) = data? {
807                            res.extend_from_slice(&leaf.data);
808                        }
809                        curr = next;
810                    }
811                    BlobContentNext::Done(done) => {
812                        // we are done with the root blob
813                        break done;
814                    }
815                }
816            };
817            Ok((done, res))
818        }
819
820        /// Write the entire blob to a slice writer and to an optional outboard.
821        ///
822        /// The outboard is only written to if the blob is larger than a single
823        /// chunk group.
824        pub async fn write_all_with_outboard<D, O>(
825            self,
826            mut outboard: Option<O>,
827            mut data: D,
828        ) -> result::Result<AtEndBlob<R>, DecodeError>
829        where
830            D: AsyncSliceWriter,
831            O: OutboardMut,
832        {
833            let mut content = self;
834            loop {
835                match content.next().await {
836                    BlobContentNext::More((content1, item)) => {
837                        content = content1;
838                        match item? {
839                            BaoContentItem::Parent(parent) => {
840                                if let Some(outboard) = outboard.as_mut() {
841                                    outboard
842                                        .save(parent.node, &parent.pair)
843                                        .await
844                                        .map_err(|e| e!(DecodeError::Write, e))?;
845                                }
846                            }
847                            BaoContentItem::Leaf(leaf) => {
848                                data.write_bytes_at(leaf.offset, leaf.data)
849                                    .await
850                                    .map_err(|e| e!(DecodeError::Write, e))?;
851                            }
852                        }
853                    }
854                    BlobContentNext::Done(end) => {
855                        return Ok(end);
856                    }
857                }
858            }
859        }
860
861        /// Write the entire blob to a slice writer.
862        pub async fn write_all<D>(self, mut data: D) -> result::Result<AtEndBlob<R>, DecodeError>
863        where
864            D: AsyncSliceWriter,
865        {
866            let mut content = self;
867            loop {
868                match content.next().await {
869                    BlobContentNext::More((content1, item)) => {
870                        content = content1;
871                        match item? {
872                            BaoContentItem::Parent(_) => {}
873                            BaoContentItem::Leaf(leaf) => {
874                                data.write_bytes_at(leaf.offset, leaf.data)
875                                    .await
876                                    .map_err(|e| e!(DecodeError::Write, e))?;
877                            }
878                        }
879                    }
880                    BlobContentNext::Done(end) => {
881                        return Ok(end);
882                    }
883                }
884            }
885        }
886
887        /// Immediately finish the get response without reading further
888        pub fn finish(self) -> AtClosing<R> {
889            AtClosing::new(self.misc, self.stream.finish().into_inner(), false)
890        }
891    }
892
893    /// State after we have read all the content for a blob
894    #[derive(Debug)]
895    pub struct AtEndBlob<R: RecvStream = DefaultReader> {
896        stream: R,
897        misc: Box<Misc>,
898    }
899
900    /// The next state after the end of a blob
901    #[derive(Debug, From)]
902    pub enum EndBlobNext<R: RecvStream = DefaultReader> {
903        /// Response is expected to have more children
904        MoreChildren(AtStartChild<R>),
905        /// No more children expected
906        Closing(AtClosing<R>),
907    }
908
909    impl<R: RecvStream> AtEndBlob<R> {
910        /// Read the next child, or finish
911        pub fn next(mut self) -> EndBlobNext<R> {
912            if let Some((offset, ranges)) = self.misc.ranges_iter.next() {
913                AtStartChild {
914                    reader: self.stream,
915                    offset,
916                    ranges,
917                    misc: self.misc,
918                }
919                .into()
920            } else {
921                AtClosing::new(self.misc, self.stream, true).into()
922            }
923        }
924    }
925
926    /// State when finishing the get response
927    #[derive(Debug)]
928    pub struct AtClosing<R: RecvStream = DefaultReader> {
929        misc: Box<Misc>,
930        reader: R,
931        check_extra_data: bool,
932    }
933
934    impl<R: RecvStream> AtClosing<R> {
935        fn new(misc: Box<Misc>, reader: R, check_extra_data: bool) -> Self {
936            Self {
937                misc,
938                reader,
939                check_extra_data,
940            }
941        }
942
943        /// Finish the get response, returning statistics
944        pub async fn next(self) -> result::Result<Stats, AtClosingNextError> {
945            // Shut down the stream
946            let mut reader = self.reader;
947            if self.check_extra_data {
948                let rest = reader.recv_bytes(1).await?;
949                if !rest.is_empty() {
950                    error!("Unexpected extra data at the end of the stream");
951                }
952            }
953            Ok(Stats {
954                counters: self.misc.counters,
955                elapsed: self.misc.start.elapsed(),
956            })
957        }
958    }
959
960    /// Error that you can get from [`AtBlobHeader::next`]
961    #[stack_error(derive, add_meta, from_sources)]
962    pub enum AtClosingNextError {
963        /// Generic io error
964        #[error(transparent)]
965        Read {
966            #[error(std_err)]
967            source: io::Error,
968        },
969    }
970
971    #[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq)]
972    pub struct RequestCounters {
973        /// payload bytes written
974        pub payload_bytes_written: u64,
975        /// request, hash pair and size bytes written
976        pub other_bytes_written: u64,
977        /// payload bytes read
978        pub payload_bytes_read: u64,
979        /// hash pair and size bytes read
980        pub other_bytes_read: u64,
981    }
982
983    /// Stuff we need to hold on to while going through the machine states
984    #[derive(Debug, derive_more::Deref, derive_more::DerefMut)]
985    struct Misc {
986        /// start time for statistics
987        start: Instant,
988        /// counters
989        #[deref]
990        #[deref_mut]
991        counters: RequestCounters,
992        /// iterator over the ranges of the collection and the children
993        ranges_iter: RangesIter,
994    }
995}