From cf03c2883a1dc21f96499b7873ef97d56b823db8 Mon Sep 17 00:00:00 2001 From: Matthias Beyer Date: Sun, 28 Sep 2025 10:45:00 +0200 Subject: [PATCH] Remove Store: Deref As per rust API guidelines [0], only smart pointers should implement `Deref`. So, we remove the `impl Deref for Store` in this patch. [0]: https://rust-lang.github.io/api-guidelines/predictability.html?highlight=Deref#only-smart-pointers-implement-deref-and-derefmut-c-deref Signed-off-by: Matthias Beyer --- src/api.rs | 10 +---- src/api/blobs.rs | 91 +++++++++++++++++++++++----------------- src/api/downloader.rs | 20 +++++---- src/api/remote.rs | 12 ++++-- src/format/collection.rs | 5 ++- src/provider.rs | 12 +++--- src/store/fs.rs | 70 +++++++++++++++++-------------- src/store/fs/gc.rs | 32 +++++++------- src/store/mem.rs | 10 +++-- src/test.rs | 4 +- src/tests.rs | 41 +++++++++--------- 11 files changed, 168 insertions(+), 139 deletions(-) diff --git a/src/api.rs b/src/api.rs index 117c59e25..a8a3fcc1a 100644 --- a/src/api.rs +++ b/src/api.rs @@ -12,7 +12,7 @@ //! //! You can also [`connect`](Store::connect) to a remote store that is listening //! to rpc requests. -use std::{io, net::SocketAddr, ops::Deref}; +use std::{io, net::SocketAddr}; use bao_tree::io::EncodeError; use iroh::Endpoint; @@ -250,14 +250,6 @@ pub struct Store { client: ApiClient, } -impl Deref for Store { - type Target = blobs::Blobs; - - fn deref(&self) -> &Self::Target { - blobs::Blobs::ref_from_sender(&self.client) - } -} - impl Store { /// The tags API. pub fn tags(&self) -> &Tags { diff --git a/src/api/blobs.rs b/src/api/blobs.rs index 897e0371c..35369edd0 100644 --- a/src/api/blobs.rs +++ b/src/api/blobs.rs @@ -4,37 +4,56 @@ //! and exporting blobs, observing the bitfield of a blob, and deleting blobs. //! //! The main entry point is the [`Blobs`] struct. -use std::{ - collections::BTreeMap, - future::{Future, IntoFuture}, - io, - num::NonZeroU64, - path::{Path, PathBuf}, - pin::Pin, -}; - +use std::collections::BTreeMap; +use std::future::Future; +use std::future::IntoFuture; +use std::io; +use std::num::NonZeroU64; +use std::path::Path; +use std::path::PathBuf; +use std::pin::Pin; + +use bao_tree::io::fsm::ResponseDecoder; +use bao_tree::io::fsm::ResponseDecoderNext; pub use bao_tree::io::mixed::EncodedItem; -use bao_tree::{ - io::{ - fsm::{ResponseDecoder, ResponseDecoderNext}, - BaoContentItem, Leaf, - }, - BaoTree, ChunkNum, ChunkRanges, -}; +use bao_tree::io::BaoContentItem; +use bao_tree::io::Leaf; +use bao_tree::BaoTree; +use bao_tree::ChunkNum; +use bao_tree::ChunkRanges; use bytes::Bytes; use genawaiter::sync::Gen; -use iroh_io::{AsyncStreamReader, TokioStreamReader}; -use irpc::channel::{mpsc, oneshot}; -use n0_future::{future, stream, Stream, StreamExt}; +use iroh_io::AsyncStreamReader; +use iroh_io::TokioStreamReader; +use irpc::channel::mpsc; +use irpc::channel::oneshot; +use n0_future::future; +use n0_future::stream; +use n0_future::Stream; +use n0_future::StreamExt; use quinn::SendStream; -use range_collections::{range_set::RangeSetRange, RangeSet2}; +use range_collections::range_set::RangeSetRange; +use range_collections::RangeSet2; use ref_cast::RefCast; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; +use serde::Serialize; use tokio::io::AsyncWriteExt; use tracing::trace; mod reader; pub use reader::BlobReader; +use super::proto::BatchResponse; +use super::proto::BlobStatusRequest; +use super::proto::ClearProtectedRequest; +use super::proto::CreateTempTagRequest; +use super::proto::ExportBaoRequest; +use super::proto::ExportRangesItem; +use super::proto::ImportBaoRequest; +use super::proto::ImportByteStreamRequest; +use super::proto::ImportBytesRequest; +use super::proto::ImportPathRequest; +use super::proto::ListRequest; +use super::proto::Scope; // Public reexports from the proto module. // // Due to the fact that the proto module is hidden from docs by default, @@ -45,23 +64,19 @@ pub use super::proto::{ ExportProgressItem, ExportRangesRequest as ExportRangesOptions, ImportBaoRequest as ImportBaoOptions, ImportMode, ObserveRequest as ObserveOptions, }; -use super::{ - proto::{ - BatchResponse, BlobStatusRequest, ClearProtectedRequest, CreateTempTagRequest, - ExportBaoRequest, ExportRangesItem, ImportBaoRequest, ImportByteStreamRequest, - ImportBytesRequest, ImportPathRequest, ListRequest, Scope, - }, - remote::HashSeqChunk, - tags::TagInfo, - ApiClient, RequestResult, Tags, -}; -use crate::{ - api::proto::{BatchRequest, ImportByteStreamUpdate}, - provider::events::ClientResult, - store::IROH_BLOCK_SIZE, - util::temp_tag::TempTag, - BlobFormat, Hash, HashAndFormat, -}; +use super::remote::HashSeqChunk; +use super::tags::TagInfo; +use super::ApiClient; +use super::RequestResult; +use super::Tags; +use crate::api::proto::BatchRequest; +use crate::api::proto::ImportByteStreamUpdate; +use crate::provider::events::ClientResult; +use crate::store::IROH_BLOCK_SIZE; +use crate::util::temp_tag::TempTag; +use crate::BlobFormat; +use crate::Hash; +use crate::HashAndFormat; /// Options for adding bytes. #[derive(Debug)] diff --git a/src/api/downloader.rs b/src/api/downloader.rs index bf78bf793..911ace228 100644 --- a/src/api/downloader.rs +++ b/src/api/downloader.rs @@ -381,7 +381,7 @@ async fn split_request<'a>( }; let first = GetRequest::blob(req.hash); execute_get(pool, Arc::new(first), providers, store, progress).await?; - let size = store.observe(req.hash).await?.size(); + let size = store.blobs().observe(req.hash).await?.size(); anyhow::ensure!(size % 32 == 0, "Size is not a multiple of 32"); let n = size / 32; Box::new( @@ -547,8 +547,8 @@ mod tests { let (r1, store1, _) = node_test_setup_fs(testdir.path().join("a")).await?; let (r2, store2, _) = node_test_setup_fs(testdir.path().join("b")).await?; let (r3, store3, _) = node_test_setup_fs(testdir.path().join("c")).await?; - let tt1 = store1.add_slice("hello world").await?; - let tt2 = store2.add_slice("hello world 2").await?; + let tt1 = store1.blobs().add_slice("hello world").await?; + let tt2 = store2.blobs().add_slice("hello world 2").await?; let node1_addr = r1.endpoint().node_addr().initialized().await; let node1_id = node1_addr.node_id; let node2_addr = r2.endpoint().node_addr().initialized().await; @@ -567,8 +567,8 @@ mod tests { while let Some(item) = progress.next().await { println!("Got item: {item:?}"); } - assert_eq!(store3.get_bytes(tt1.hash).await?.deref(), b"hello world"); - assert_eq!(store3.get_bytes(tt2.hash).await?.deref(), b"hello world 2"); + assert_eq!(store3.blobs().get_bytes(tt1.hash).await?.deref(), b"hello world"); + assert_eq!(store3.blobs().get_bytes(tt2.hash).await?.deref(), b"hello world 2"); Ok(()) } @@ -579,10 +579,11 @@ mod tests { let (r1, store1, _) = node_test_setup_fs(testdir.path().join("a")).await?; let (r2, store2, _) = node_test_setup_fs(testdir.path().join("b")).await?; let (r3, store3, _) = node_test_setup_fs(testdir.path().join("c")).await?; - let tt1 = store1.add_slice(vec![1; 10000000]).await?; - let tt2 = store2.add_slice(vec![2; 10000000]).await?; + let tt1 = store1.blobs().add_slice(vec![1; 10000000]).await?; + let tt2 = store2.blobs().add_slice(vec![2; 10000000]).await?; let hs = [tt1.hash, tt2.hash].into_iter().collect::(); let root = store1 + .blobs() .add_bytes_with_opts(AddBytesOptions { data: hs.clone().into(), format: crate::BlobFormat::HashSeq, @@ -648,10 +649,11 @@ mod tests { let (r1, store1, _) = node_test_setup_fs(testdir.path().join("a")).await?; let (r2, store2, _) = node_test_setup_fs(testdir.path().join("b")).await?; let (r3, store3, _) = node_test_setup_fs(testdir.path().join("c")).await?; - let tt1 = store1.add_slice(vec![1; 10000000]).await?; - let tt2 = store2.add_slice(vec![2; 10000000]).await?; + let tt1 = store1.blobs().add_slice(vec![1; 10000000]).await?; + let tt2 = store2.blobs().add_slice(vec![2; 10000000]).await?; let hs = [tt1.hash, tt2.hash].into_iter().collect::(); let root = store1 + .blobs() .add_bytes_with_opts(AddBytesOptions { data: hs.clone().into(), format: crate::BlobFormat::HashSeq, diff --git a/src/api/remote.rs b/src/api/remote.rs index dcfbc4fb4..9ed7faa29 100644 --- a/src/api/remote.rs +++ b/src/api/remote.rs @@ -416,13 +416,13 @@ impl Remote { ) -> anyhow::Result { let request = request.into(); let root = request.hash; - let bitfield = self.store().observe(root).await?; + let bitfield = self.store().blobs().observe(root).await?; let children = if !request.ranges.is_blob() { let opts = ExportBaoOptions { hash: root, ranges: bitfield.ranges.clone(), }; - let bao = self.store().export_bao_with_opts(opts, 32); + let bao = self.store().blobs().export_bao_with_opts(opts, 32); let mut by_index = BTreeMap::new(); let mut stream = bao.hashes_with_index(); while let Some(item) = stream.next().await { @@ -447,7 +447,7 @@ impl Remote { // we don't have the hash, so we can't store the bitfield continue; }; - let bitfield = self.store().observe(*hash).await?; + let bitfield = self.store().blobs().observe(*hash).await?; bitfields.insert(*hash, bitfield); hash_seq.insert(child, *hash); } @@ -592,6 +592,7 @@ impl Remote { let root_ranges = request_ranges.next().expect("infinite iterator"); if !root_ranges.is_empty() { self.store() + .blobs() .export_bao(root, root_ranges.clone()) .write_quinn_with_progress(&mut send, &mut context, &root, 0) .await?; @@ -601,13 +602,14 @@ impl Remote { send.finish()?; return Ok(Default::default()); } - let hash_seq = self.store().get_bytes(root).await?; + let hash_seq = self.store().blobs().get_bytes(root).await?; let hash_seq = HashSeq::try_from(hash_seq)?; for (child, (child_hash, child_ranges)) in hash_seq.into_iter().zip(request_ranges).enumerate() { if !child_ranges.is_empty() { self.store() + .blobs() .export_bao(child_hash, child_ranges.clone()) .write_quinn_with_progress( &mut send, @@ -681,6 +683,7 @@ impl Remote { let mut next_child = Ok(at_start_child); let hash_seq = HashSeq::try_from( store + .blobs() .get_bytes(root) .await .map_err(|e| LocalFailureSnafu.into_error(e.into()))?, @@ -891,6 +894,7 @@ async fn get_blob_ranges_impl( let buffer_size = get_buffer_size(size); trace!(%size, %buffer_size, "get blob"); let handle = store + .blobs() .import_bao(hash, size, buffer_size) .await .map_err(|e| LocalFailureSnafu.into_error(e.into()))?; diff --git a/src/format/collection.rs b/src/format/collection.rs index 9716faf86..f0d9dbf0b 100644 --- a/src/format/collection.rs +++ b/src/format/collection.rs @@ -71,7 +71,7 @@ pub trait SimpleStore { impl SimpleStore for crate::api::Store { async fn load(&self, hash: Hash) -> anyhow::Result { - Ok(self.get_bytes(hash).await?) + Ok(self.blobs().get_bytes(hash).await?) } } @@ -190,11 +190,12 @@ impl Collection { pub async fn store(self, db: &Store) -> anyhow::Result { let (links, meta) = self.into_parts(); let meta_bytes = postcard::to_stdvec(&meta)?; - let meta_tag = db.add_bytes(meta_bytes).temp_tag().await?; + let meta_tag = db.blobs().add_bytes(meta_bytes).temp_tag().await?; let links_bytes = std::iter::once(*meta_tag.hash()) .chain(links) .collect::(); let links_tag = db + .blobs() .add_bytes_with_opts(AddBytesOptions { data: links_bytes.into(), format: BlobFormat::HashSeq, diff --git a/src/provider.rs b/src/provider.rs index ba415df41..edbf84c53 100644 --- a/src/provider.rs +++ b/src/provider.rs @@ -411,7 +411,7 @@ pub async fn handle_get( let hash_seq = match &hash_seq { Some(b) => b, None => { - let bytes = store.get_bytes(hash).await?; + let bytes = store.blobs().get_bytes(hash).await?; let hs = HashSeq::try_from(bytes)?; hash_seq = Some(hs); hash_seq.as_ref().unwrap() @@ -460,7 +460,7 @@ pub async fn handle_push( let root_ranges = request_ranges.next().expect("infinite iterator"); if !root_ranges.is_empty() { // todo: send progress from import_bao_quinn or rename to import_bao_quinn_with_progress - store + store.blobs() .import_bao_quinn(hash, root_ranges.clone(), &mut reader.inner) .await?; } @@ -469,13 +469,13 @@ pub async fn handle_push( return Ok(()); } // todo: we assume here that the hash sequence is complete. For some requests this might not be the case. We would need `LazyHashSeq` for that, but it is buggy as of now! - let hash_seq = store.get_bytes(hash).await?; + let hash_seq = store.blobs().get_bytes(hash).await?; let hash_seq = HashSeq::try_from(hash_seq)?; for (child_hash, child_ranges) in hash_seq.into_iter().zip(request_ranges) { if child_ranges.is_empty() { continue; } - store + store.blobs() .import_bao_quinn(child_hash, child_ranges.clone(), &mut reader.inner) .await?; } @@ -490,7 +490,7 @@ pub(crate) async fn send_blob( ranges: ChunkRanges, writer: &mut ProgressWriter, ) -> ExportBaoResult<()> { - store + store.blobs() .export_bao(hash, ranges) .write_quinn_with_progress(&mut writer.inner, &mut writer.context, &hash, index) .await @@ -504,7 +504,7 @@ pub async fn handle_observe( request: ObserveRequest, writer: &mut ProgressWriter, ) -> Result<()> { - let mut stream = store.observe(request.hash).stream().await?; + let mut stream = store.blobs().observe(request.hash).stream().await?; let mut old = stream .next() .await diff --git a/src/store/fs.rs b/src/store/fs.rs index 48946abd6..fc78fdb24 100644 --- a/src/store/fs.rs +++ b/src/store/fs.rs @@ -1575,12 +1575,12 @@ pub mod tests { let data = test_data(size); let ranges = ChunkRanges::all(); let (hash, bao) = create_n0_bao(&data, &ranges)?; - let obs = store.observe(hash); + let obs = store.blobs().observe(hash); let task = tokio::spawn(async move { obs.await_completion().await?; api::Result::Ok(()) }); - store.import_bao_bytes(hash, ranges, bao).await?; + store.blobs().import_bao_bytes(hash, ranges, bao).await?; task.await??; } Ok(()) @@ -1615,12 +1615,12 @@ pub mod tests { let expected = test_data(size); let expected_hash = Hash::new(&expected); let stream = bytes_to_stream(expected.clone(), 1023); - let obs = store.observe(expected_hash); - let tt = store.add_stream(stream).await.temp_tag().await?; + let obs = store.blobs().observe(expected_hash); + let tt = store.blobs().add_stream(stream).await.temp_tag().await?; assert_eq!(expected_hash, *tt.hash()); // we must at some point see completion, otherwise the test will hang obs.await_completion().await?; - let actual = store.get_bytes(expected_hash).await?; + let actual = store.blobs().get_bytes(expected_hash).await?; // check that the data is there assert_eq!(&expected, &actual); } @@ -1639,12 +1639,12 @@ pub mod tests { for size in sizes { let expected = test_data(size); let expected_hash = Hash::new(&expected); - let obs = store.observe(expected_hash); - let tt = store.add_bytes(expected.clone()).await?; + let obs = store.blobs().observe(expected_hash); + let tt = store.blobs().add_bytes(expected.clone()).await?; assert_eq!(expected_hash, tt.hash); // we must at some point see completion, otherwise the test will hang obs.await_completion().await?; - let actual = store.get_bytes(expected_hash).await?; + let actual = store.blobs().get_bytes(expected_hash).await?; // check that the data is there assert_eq!(&expected, &actual); } @@ -1667,10 +1667,10 @@ pub mod tests { { let expected = test_data(size); let expected_hash = Hash::new(&expected); - let obs = store.observe(expected_hash); - let tt = store.add_bytes(expected.clone()).await?; + let obs = store.blobs().observe(expected_hash); + let tt = store.blobs().add_bytes(expected.clone()).await?; assert_eq!(expected_hash, tt.hash); - let actual = store.get_bytes(expected_hash).await?; + let actual = store.blobs().get_bytes(expected_hash).await?; // check that the data is there assert_eq!(&expected, &actual); assert_eq!( @@ -1699,12 +1699,12 @@ pub mod tests { let expected_hash = Hash::new(&expected); let path = testdir.path().join(format!("in-{size}")); fs::write(&path, &expected)?; - let obs = store.observe(expected_hash); - let tt = store.add_path(&path).await?; + let obs = store.blobs().observe(expected_hash); + let tt = store.blobs().add_path(&path).await?; assert_eq!(expected_hash, tt.hash); // we must at some point see completion, otherwise the test will hang obs.await_completion().await?; - let actual = store.get_bytes(expected_hash).await?; + let actual = store.blobs().get_bytes(expected_hash).await?; // check that the data is there assert_eq!(&expected, &actual, "size={size}"); } @@ -1722,10 +1722,10 @@ pub mod tests { for size in INTERESTING_SIZES { let expected = test_data(size); let expected_hash = Hash::new(&expected); - let tt = store.add_bytes(expected.clone()).await?; + let tt = store.blobs().add_bytes(expected.clone()).await?; assert_eq!(expected_hash, tt.hash); let out_path = testdir.path().join(format!("out-{size}")); - store.export(expected_hash, &out_path).await?; + store.blobs().export(expected_hash, &out_path).await?; let actual = fs::read(&out_path)?; assert_eq!(expected, actual); } @@ -1743,12 +1743,13 @@ pub mod tests { let ranges = ChunkRanges::chunks(16..32); let (hash, bao) = create_n0_bao(&data, &ranges)?; store + .blobs() .import_bao_bytes(hash, ranges.clone(), bao.clone()) .await?; - let bitfield = store.observe(hash).await?; + let bitfield = store.blobs().observe(hash).await?; assert_eq!(bitfield.ranges, ranges); assert_eq!(bitfield.size(), data.len() as u64); - let export = store.export_bao(hash, ranges).bao_to_vec().await?; + let export = store.blobs().export_bao(hash, ranges).bao_to_vec().await?; assert_eq!(export, bao); } Ok(()) @@ -1767,6 +1768,7 @@ pub mod tests { let (hash, encoded) = create_n0_bao(&data, &ChunkRanges::all())?; let data = Bytes::from(encoded); store + .blobs() .import_bao_bytes(hash, ChunkRanges::all(), data) .await?; } @@ -1789,6 +1791,7 @@ pub mod tests { let data = Bytes::from(encoded); trace!("importing size={}", size); store + .blobs() .import_bao_bytes(hash, ChunkRanges::all(), data) .await?; } @@ -1810,6 +1813,7 @@ pub mod tests { let (hash, encoded) = create_n0_bao(&data, &ChunkRanges::all())?; let data = Bytes::from(encoded); store + .blobs() .import_bao_bytes(hash, ChunkRanges::all(), data) .await?; } @@ -1821,6 +1825,7 @@ pub mod tests { let expected = vec![0u8; size]; let hash = Hash::new(&expected); let actual = store + .blobs() .export_bao(hash, ChunkRanges::all()) .data_to_vec() .await?; @@ -1844,7 +1849,7 @@ pub mod tests { let data = test_data(size); let (hash, ranges, encoded) = create_n0_bao_full(&data, &just_size)?; let data = Bytes::from(encoded); - if let Err(cause) = store.import_bao_bytes(hash, ranges, data).await { + if let Err(cause) = store.blobs().import_bao_bytes(hash, ranges, data).await { panic!("failed to import size={size}: {cause}"); } } @@ -1857,7 +1862,7 @@ pub mod tests { for size in sizes { let data = test_data(size); let (hash, ranges, expected) = create_n0_bao_full(&data, &just_size)?; - let actual = match store.export_bao(hash, ranges).bao_to_vec().await { + let actual = match store.blobs().export_bao(hash, ranges).bao_to_vec().await { Ok(actual) => actual, Err(cause) => panic!("failed to export size={size}: {cause}"), }; @@ -1883,7 +1888,7 @@ pub mod tests { let data = test_data(size); let (hash, ranges, encoded) = create_n0_bao_full(&data, &just_size)?; let data = Bytes::from(encoded); - if let Err(cause) = store.import_bao_bytes(hash, ranges, data).await { + if let Err(cause) = store.blobs().import_bao_bytes(hash, ranges, data).await { panic!("failed to import size={size}: {cause}"); } } @@ -1902,7 +1907,7 @@ pub mod tests { let data = test_data(size); let (hash, ranges, encoded) = create_n0_bao_full(&data, &remaining)?; let data = Bytes::from(encoded); - if let Err(cause) = store.import_bao_bytes(hash, ranges, data).await { + if let Err(cause) = store.blobs().import_bao_bytes(hash, ranges, data).await { panic!("failed to import size={size}: {cause}"); } } @@ -1916,7 +1921,7 @@ pub mod tests { for size in sizes { let data = test_data(size); let (hash, ranges, expected) = create_n0_bao_full(&data, &ChunkRanges::all())?; - let actual = match store.export_bao(hash, ranges).bao_to_vec().await { + let actual = match store.blobs().export_bao(hash, ranges).bao_to_vec().await { Ok(actual) => actual, Err(cause) => panic!("failed to export size={size}: {cause}"), }; @@ -1947,7 +1952,7 @@ pub mod tests { let data = test_data(size); let (hash, ranges, encoded) = create_n0_bao_full(&data, &just_size)?; let data = Bytes::from(encoded); - if let Err(cause) = store.import_bao_bytes(hash, ranges, data).await { + if let Err(cause) = store.blobs().import_bao_bytes(hash, ranges, data).await { panic!("failed to import size={size}: {cause}"); } } @@ -1962,7 +1967,7 @@ pub mod tests { let expected_ranges = round_up_request(size as u64, &just_size); let data = test_data(size); let hash = Hash::new(&data); - let bitfield = store.observe(hash).await?; + let bitfield = store.blobs().observe(hash).await?; assert_eq!(bitfield.ranges, expected_ranges); } store.dump().await?; @@ -1986,7 +1991,7 @@ pub mod tests { let data = test_data(size); let (hash, ranges, encoded) = create_n0_bao_full(&data, &just_size)?; let data = Bytes::from(encoded); - if let Err(cause) = store.import_bao_bytes(hash, ranges, data).await { + if let Err(cause) = store.blobs().import_bao_bytes(hash, ranges, data).await { panic!("failed to import size={size}: {cause}"); } } @@ -2002,7 +2007,7 @@ pub mod tests { let expected_ranges = round_up_request(size as u64, &just_size); let data = test_data(size); let hash = Hash::new(&data); - let bitfield = store.observe(hash).await?; + let bitfield = store.blobs().observe(hash).await?; assert_eq!(bitfield.ranges, expected_ranges, "size={size}"); } store.dump().await?; @@ -2023,7 +2028,7 @@ pub mod tests { for size in sizes { let data = test_data(size); let data = data; - tts.push(store.add_bytes(data.clone()).await?); + tts.push(store.blobs().add_bytes(data.clone()).await?); } store.dump().await?; store.shutdown().await?; @@ -2035,6 +2040,7 @@ pub mod tests { let expected = test_data(size); let hash = Hash::new(&expected); let Ok(actual) = store + .blobs() .export_bao(hash, ChunkRanges::all()) .data_to_vec() .await @@ -2103,7 +2109,7 @@ pub mod tests { for size in sizes { let data = vec![0u8; size]; let data = Bytes::from(data); - let tt = store.add_bytes(data.clone()).temp_tag().await?; + let tt = store.blobs().add_bytes(data.clone()).temp_tag().await?; data_by_hash.insert(*tt.hash(), data); hashes.push(tt); } @@ -2111,17 +2117,19 @@ pub mod tests { for tt in &hashes { let hash = *tt.hash(); let path = testdir.path().join(format!("{hash}.txt")); - store.export(hash, path).await?; + store.blobs().export(hash, path).await?; } for tt in &hashes { let hash = tt.hash(); let data = store + .blobs() .export_bao(*hash, ChunkRanges::all()) .data_to_vec() .await .unwrap(); assert_eq!(data, data_by_hash[hash].to_vec()); let bao = store + .blobs() .export_bao(*hash, ChunkRanges::all()) .bao_to_vec() .await @@ -2134,7 +2142,7 @@ pub mod tests { let data = test_data(size); let ranges = ChunkRanges::all(); let (hash, bao) = create_n0_bao(&data, &ranges)?; - store.import_bao_bytes(hash, ranges, bao).await?; + store.blobs().import_bao_bytes(hash, ranges, bao).await?; } for (_hash, _bao_tree) in bao_by_hash { diff --git a/src/store/fs/gc.rs b/src/store/fs/gc.rs index da7836e76..565ed9f9a 100644 --- a/src/store/fs/gc.rs +++ b/src/store/fs/gc.rs @@ -63,7 +63,7 @@ pub(super) async fn gc_mark_task( for HashAndFormat { hash, format } in roots { // we need to do this for all formats except raw if live.insert(hash) && !format.is_raw() { - let mut stream = store.export_bao(hash, ChunkRanges::all()).hashes(); + let mut stream = store.blobs().export_bao(hash, ChunkRanges::all()).hashes(); while let Some(hash) = stream.next().await { match hash { Ok(hash) => { @@ -176,7 +176,7 @@ pub type ProtectCb = Arc< pub async fn gc_run_once(store: &Store, live: &mut HashSet) -> crate::api::Result<()> { debug!(externally_protected = live.len(), "gc: start"); { - store.clear_protected().await?; + store.blobs().clear_protected().await?; let mut stream = gc_mark(store, live); while let Some(ev) = stream.next().await { match ev { @@ -296,23 +296,23 @@ mod tests { gc_run_once(store, &mut live).await?; // a is protected because we keep the temp tag assert!(live.contains(&a)); - assert!(store.has(a).await?); + assert!(store.blobs().has(a).await?); // b is not protected because we drop the temp tag assert!(!live.contains(&b)); - assert!(!store.has(b).await?); + assert!(!store.blobs().has(b).await?); // c is protected because we set an explicit tag assert!(live.contains(&c)); - assert!(store.has(c).await?); + assert!(store.blobs().has(c).await?); // d and e are protected because they are part of a hashseq protected by a temp tag assert!(live.contains(&d)); - assert!(store.has(d).await?); + assert!(store.blobs().has(d).await?); assert!(live.contains(&e)); - assert!(store.has(e).await?); + assert!(store.blobs().has(e).await?); // f and g are protected because they are part of a hashseq protected by a tag assert!(live.contains(&f)); - assert!(store.has(f).await?); + assert!(store.blobs().has(f).await?); assert!(live.contains(&g)); - assert!(store.has(g).await?); + assert!(store.blobs().has(g).await?); drop(at); drop(hehs); Ok(()) @@ -333,7 +333,7 @@ mod tests { let outboard_path = options.outboard_path(ah); assert!(data_path.exists()); assert!(outboard_path.exists()); - assert!(store.has(*ah).await?); + assert!(store.blobs().has(*ah).await?); drop(a); gc_run_once(store, &mut live).await?; assert!(!data_path.exists()); @@ -346,7 +346,7 @@ mod tests { let data = vec![1u8; 8000000]; let ranges = ChunkRanges::from(..ChunkNum(19)); let (bh, b_bao) = create_n0_bao(&data, &ranges)?; - store.import_bao_bytes(bh, ranges, b_bao).await?; + store.blobs().import_bao_bytes(bh, ranges, b_bao).await?; let data_path = options.data_path(&bh); let outboard_path = options.outboard_path(&bh); let sizes_path = options.sizes_path(&bh); @@ -401,15 +401,15 @@ mod tests { } async fn gc_check_deletion(store: &Store) -> TestResult { - let temp_tag = store.add_bytes(b"foo".to_vec()).temp_tag().await?; + let temp_tag = store.blobs().add_bytes(b"foo".to_vec()).temp_tag().await?; let hash = *temp_tag.hash(); - assert_eq!(store.get_bytes(hash).await?.as_ref(), b"foo"); + assert_eq!(store.blobs().get_bytes(hash).await?.as_ref(), b"foo"); drop(temp_tag); let mut live = HashSet::new(); gc_run_once(store, &mut live).await?; // check that `get_bytes` returns an error. - let res = store.get_bytes(hash).await; + let res = store.blobs().get_bytes(hash).await; assert!(res.is_err()); assert!(matches!( res, @@ -421,6 +421,7 @@ mod tests { // check that `export_ranges` returns an error. let res = store + .blobs() .export_ranges(hash, RangeSet2::all()) .concatenate() .await; @@ -435,6 +436,7 @@ mod tests { // check that `export_bao` returns an error. let res = store + .blobs() .export_bao(hash, ChunkRanges::all()) .bao_to_vec() .await; @@ -451,7 +453,7 @@ mod tests { // check that `export` returns an error. let target = tempfile::NamedTempFile::new()?; let path = target.path(); - let res = store.export(hash, path).await; + let res = store.blobs().export(hash, path).await; assert!(res.is_err()); assert!(matches!( res, diff --git a/src/store/mem.rs b/src/store/mem.rs index e5529e7fa..1880e7124 100644 --- a/src/store/mem.rs +++ b/src/store/mem.rs @@ -1071,28 +1071,30 @@ mod tests { #[tokio::test] async fn smoke() -> TestResult<()> { let store = MemStore::new(); - let tt = store.add_bytes(vec![0u8; 1024 * 64]).temp_tag().await?; + let tt = store.blobs().add_bytes(vec![0u8; 1024 * 64]).temp_tag().await?; let hash = *tt.hash(); println!("hash: {hash:?}"); - let mut stream = store.export_bao(hash, ChunkRanges::all()).stream(); + let mut stream = store.blobs().export_bao(hash, ChunkRanges::all()).stream(); while let Some(item) = stream.next().await { println!("item: {item:?}"); } - let stream = store.export_bao(hash, ChunkRanges::all()); + let stream = store.blobs().export_bao(hash, ChunkRanges::all()); let exported = stream.bao_to_vec().await?; let store2 = MemStore::new(); - let mut or = store2.observe(hash).stream().await?; + let mut or = store2.blobs().observe(hash).stream().await?; tokio::spawn(async move { while let Some(event) = or.next().await { println!("event: {event:?}"); } }); store2 + .blobs() .import_bao_bytes(hash, ChunkRanges::all(), exported.clone()) .await?; let exported2 = store2 + .blobs() .export_bao(hash, ChunkRanges::all()) .bao_to_vec() .await?; diff --git a/src/test.rs b/src/test.rs index c0760a088..326599099 100644 --- a/src/test.rs +++ b/src/test.rs @@ -25,7 +25,7 @@ pub async fn create_random_blobs( let mut rand = rand::rngs::StdRng::seed_from_u64(seed); let mut data = vec![0u8; size]; rand.fill_bytes(&mut data); - store.add_bytes(data).into_future() + store.blobs().add_bytes(data).into_future() }) .collect::>() .await; @@ -49,7 +49,7 @@ pub async fn add_hash_sequences( tags[j].hash }) .collect::(); - store + store.blobs() .add_bytes_with_opts(AddBytesOptions { data: hs.into(), format: BlobFormat::HashSeq, diff --git a/src/tests.rs b/src/tests.rs index 09b2e5b33..6084a9457 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -224,14 +224,14 @@ async fn two_nodes_get_blobs( let sizes = INTERESTING_SIZES; let mut tts = Vec::new(); for size in sizes { - tts.push(store1.add_bytes(test_data(size)).await?); + tts.push(store1.blobs().add_bytes(test_data(size)).await?); } let addr1 = r1.endpoint().node_addr().initialized().await; let conn = r2.endpoint().connect(addr1, crate::ALPN).await?; for size in sizes { let hash = Hash::new(test_data(size)); store2.remote().fetch(conn.clone(), hash).await?; - let actual = store2.get_bytes(hash).await?; + let actual = store2.blobs().get_bytes(hash).await?; assert_eq!(actual, test_data(size)); } tokio::try_join!(r1.shutdown(), r2.shutdown())?; @@ -275,6 +275,7 @@ async fn two_nodes_observe( io::Result::Ok(()) }); store1 + .blobs() .import_bao_bytes(hash, ChunkRanges::all(), bao) .await?; remote_observe_task.await??; @@ -305,7 +306,7 @@ async fn two_nodes_get_many( let sizes = INTERESTING_SIZES; let mut tts = Vec::new(); for size in sizes { - tts.push(store1.add_bytes(test_data(size)).await?); + tts.push(store1.blobs().add_bytes(test_data(size)).await?); } let hashes = tts.iter().map(|tt| tt.hash).collect::>(); let addr1 = r1.endpoint().node_addr().initialized().await; @@ -317,7 +318,7 @@ async fn two_nodes_get_many( for size in sizes { let expected = test_data(size); let hash = Hash::new(&expected); - let actual = store2.get_bytes(hash).await?; + let actual = store2.blobs().get_bytes(hash).await?; assert_eq!(actual, expected); } tokio::try_join!(r1.shutdown(), r2.shutdown())?; @@ -383,7 +384,7 @@ async fn two_nodes_push_blobs( let sizes = INTERESTING_SIZES; let mut tts = Vec::new(); for size in sizes { - tts.push(store1.add_bytes(test_data(size)).await?); + tts.push(store1.blobs().add_bytes(test_data(size)).await?); } let addr2 = r2.endpoint().node_addr().initialized().await; let conn = r1.endpoint().connect(addr2, crate::ALPN).await?; @@ -399,7 +400,7 @@ async fn two_nodes_push_blobs( ) .await?; count_rx.changed().await?; - let actual = store2.get_bytes(hash).await?; + let actual = store2.blobs().get_bytes(hash).await?; assert_eq!(actual, test_data(size)); } tokio::try_join!(r1.shutdown(), r2.shutdown())?; @@ -427,10 +428,10 @@ async fn two_nodes_push_blobs_mem() -> TestResult<()> { } pub async fn add_test_hash_seq( - blobs: &Store, + store: &Store, sizes: impl IntoIterator, ) -> TestResult { - let batch = blobs.batch().await?; + let batch = store.blobs().batch().await?; let mut tts = Vec::new(); for size in sizes { tts.push(batch.add_bytes(test_data(size)).await?); @@ -444,11 +445,11 @@ pub async fn add_test_hash_seq( } pub async fn add_test_hash_seq_incomplete( - blobs: &Store, + store: &Store, sizes: impl IntoIterator, present: impl Fn(usize) -> ChunkRanges, ) -> TestResult { - let batch = blobs.batch().await?; + let batch = store.blobs().batch().await?; let mut tts = Vec::new(); for (i, size) in sizes.into_iter().enumerate() { let data = test_data(size); @@ -458,7 +459,7 @@ pub async fn add_test_hash_seq_incomplete( // why isn't import_bao_bytes returning a temp tag anyway? tts.push(batch.temp_tag(hash).await?); if !ranges.is_empty() { - blobs.import_bao_bytes(hash, ranges, bao).await?; + store.blobs().import_bao_bytes(hash, ranges, bao).await?; } } let hash_seq = tts.iter().map(|tt| *tt.hash()).collect::(); @@ -466,8 +467,8 @@ pub async fn add_test_hash_seq_incomplete( let ranges = present(0); let (root, bao) = create_n0_bao(&hash_seq_bytes, &ranges)?; let content = HashAndFormat::hash_seq(root); - blobs.tags().create(content).await?; - blobs.import_bao_bytes(root, ranges, bao).await?; + store.tags().create(content).await?; + store.blobs().import_bao_bytes(root, ranges, bao).await?; Ok(content) } @@ -476,6 +477,7 @@ async fn check_presence(store: &Store, sizes: &[usize]) -> TestResult<()> { let expected = test_data(*size); let hash = Hash::new(&expected); let actual = store + .blobs() .export_bao(hash, ChunkRanges::all()) .data_to_bytes() .await?; @@ -598,11 +600,11 @@ async fn node_serve_hash_seq() -> TestResult<()> { let mut tts = Vec::new(); // add all the sizes for size in sizes { - let tt = store.add_bytes(test_data(size)).await?; + let tt = store.blobs().add_bytes(test_data(size)).await?; tts.push(tt); } let hash_seq = tts.iter().map(|x| x.hash).collect::(); - let root_tt = store.add_bytes(hash_seq).await?; + let root_tt = store.blobs().add_bytes(hash_seq).await?; let root = root_tt.hash; let endpoint = Endpoint::builder().discovery_n0().bind().await?; let blobs = crate::net_protocol::BlobsProtocol::new(&store, endpoint.clone(), None); @@ -633,7 +635,7 @@ async fn node_serve_blobs() -> TestResult<()> { // add all the sizes let mut tts = Vec::new(); for size in sizes { - tts.push(store.add_bytes(test_data(size)).await?); + tts.push(store.blobs().add_bytes(test_data(size)).await?); } let endpoint = Endpoint::builder().discovery_n0().bind().await?; let blobs = crate::net_protocol::BlobsProtocol::new(&store, endpoint.clone(), None); @@ -675,7 +677,7 @@ async fn node_smoke_mem() -> TestResult<()> { } async fn node_smoke(store: &Store) -> TestResult<()> { - let tt = store.add_bytes(b"hello world".to_vec()).temp_tag().await?; + let tt = store.blobs().add_bytes(b"hello world".to_vec()).temp_tag().await?; let hash = *tt.hash(); let endpoint = Endpoint::builder().discovery_n0().bind().await?; let blobs = crate::net_protocol::BlobsProtocol::new(store, endpoint.clone(), None); @@ -703,7 +705,7 @@ async fn test_export_chunk() -> TestResult { let blobs = store.blobs(); for size in [1024 * 18 + 1] { let data = vec![0u8; size]; - let tt = store.add_slice(&data).temp_tag().await?; + let tt = blobs.add_slice(&data).temp_tag().await?; let hash = *tt.hash(); let c = blobs.export_chunk(hash, 0).await; println!("{c:?}"); @@ -720,6 +722,7 @@ async fn test_export_ranges( range: Range, ) -> TestResult { let actual = store + .blobs() .export_ranges(hash, range.clone()) .concatenate() .await?; @@ -749,7 +752,7 @@ async fn export_ranges_smoke(store: &Store) -> TestResult { let sizes = INTERESTING_SIZES; for size in sizes { let data = test_data(size); - let tt = store.add_bytes(data.clone()).await?; + let tt = store.blobs().add_bytes(data.clone()).await?; let hash = tt.hash; let size = size as u64; test_export_ranges(store, hash, &data, 0..size).await?;