From 35ce7805230ac7732a1bf3213be5424a1e019a44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BCdiger=20Klaehn?= Date: Fri, 7 Jun 2024 14:10:05 +0300 Subject: [PATCH] refactor(iroh)!: replace public fields in iroh client with accessors and use ref-cast to eliminate them entirely (#2350) ## Description With 4 different clients, the current approach might be OK. But we are going to have more. E.g. gossip, see https://github.com/n0-computer/iroh/pull/2258 . And in any case it feels weird to store the same thing multiple times. So this replaces public fields in the iroh client with accessors and use ref-cast to eliminate them entirely. There is now only one rpc field, and you can switch from that to the different subsystem client views without runtime overhead, not even an arc clone. ## Breaking Changes Everything that uses the iroh client will have to switch from field accesses to accessor fns. ## Notes & open questions ## Change checklist - [ ] Self-review. - [ ] Documentation updates if relevant. - [ ] Tests if relevant. - [ ] All breaking changes documented. --- iroh-cli/src/commands/author.rs | 12 ++-- iroh-cli/src/commands/blob.rs | 30 +++++----- iroh-cli/src/commands/doc.rs | 16 +++--- iroh-cli/src/commands/tag.rs | 4 +- iroh-cli/src/config.rs | 2 +- iroh/examples/client.rs | 4 +- iroh/examples/collection-fetch.rs | 6 +- iroh/examples/collection-provide.rs | 8 +-- iroh/examples/hello-world-fetch.rs | 4 +- iroh/examples/hello-world-provide.rs | 4 +- iroh/src/client.rs | 29 ++++++++++ iroh/src/client/authors.rs | 24 ++++---- iroh/src/client/blobs.rs | 42 +++++++------- iroh/src/client/docs.rs | 6 +- iroh/src/node.rs | 59 ++++++++++---------- iroh/tests/gc.rs | 4 +- iroh/tests/provide.rs | 2 +- iroh/tests/sync.rs | 82 ++++++++++++++-------------- 18 files changed, 185 insertions(+), 153 deletions(-) diff --git a/iroh-cli/src/commands/author.rs b/iroh-cli/src/commands/author.rs index 8da797845cb..2ad98a48b6b 100644 --- a/iroh-cli/src/commands/author.rs +++ b/iroh-cli/src/commands/author.rs @@ -48,7 +48,7 @@ impl AuthorCommands { println!("Active author is now {}", fmt_short(author.as_bytes())); } Self::List => { - let mut stream = iroh.authors.list().await?; + let mut stream = iroh.authors().list().await?; while let Some(author_id) = stream.try_next().await? { println!("{}", author_id); } @@ -57,7 +57,7 @@ impl AuthorCommands { if switch && !env.is_console() { bail!("The --switch flag is only supported within the Iroh console."); } - let author_id = iroh.authors.default().await?; + let author_id = iroh.authors().default().await?; println!("{}", author_id); if switch { env.set_author(author_id)?; @@ -69,7 +69,7 @@ impl AuthorCommands { bail!("The --switch flag is only supported within the Iroh console."); } - let author_id = iroh.authors.create().await?; + let author_id = iroh.authors().create().await?; println!("{}", author_id); if switch { @@ -78,10 +78,10 @@ impl AuthorCommands { } } Self::Delete { author } => { - iroh.authors.delete(author).await?; + iroh.authors().delete(author).await?; println!("Deleted author {}", fmt_short(author.as_bytes())); } - Self::Export { author } => match iroh.authors.export(author).await? { + Self::Export { author } => match iroh.authors().export(author).await? { Some(author) => { println!("{}", author); } @@ -92,7 +92,7 @@ impl AuthorCommands { Self::Import { author } => match Author::from_str(&author) { Ok(author) => { let id = author.id(); - iroh.authors.import(author).await?; + iroh.authors().import(author).await?; println!("Imported {}", fmt_short(id)); } Err(err) => { diff --git a/iroh-cli/src/commands/blob.rs b/iroh-cli/src/commands/blob.rs index cb1a9fb2e6e..9e2c7208c62 100644 --- a/iroh-cli/src/commands/blob.rs +++ b/iroh-cli/src/commands/blob.rs @@ -262,7 +262,7 @@ impl BlobCommands { }; let mut stream = iroh - .blobs + .blobs() .download_with_opts( hash, DownloadOptions { @@ -281,7 +281,7 @@ impl BlobCommands { Some(OutputTarget::Stdout) => { // we asserted above that `OutputTarget::Stdout` is only permitted if getting a // single hash and not a hashseq. - let mut blob_read = iroh.blobs.read(hash).await?; + let mut blob_read = iroh.blobs().read(hash).await?; tokio::io::copy(&mut blob_read, &mut tokio::io::stdout()).await?; } Some(OutputTarget::Path(path)) => { @@ -299,7 +299,7 @@ impl BlobCommands { false => ExportFormat::Blob, }; tracing::info!("exporting to {} -> {}", path.display(), absolute.display()); - let stream = iroh.blobs.export(hash, absolute, format, mode).await?; + let stream = iroh.blobs().export(hash, absolute, format, mode).await?; // TODO: report export progress stream.await?; @@ -320,7 +320,7 @@ impl BlobCommands { !recursive, "Recursive option is not supported when exporting to STDOUT" ); - let mut blob_read = iroh.blobs.read(hash).await?; + let mut blob_read = iroh.blobs().read(hash).await?; tokio::io::copy(&mut blob_read, &mut tokio::io::stdout()).await?; } OutputTarget::Path(path) => { @@ -341,7 +341,7 @@ impl BlobCommands { path.display(), absolute.display() ); - let stream = iroh.blobs.export(hash, absolute, format, mode).await?; + let stream = iroh.blobs().export(hash, absolute, format, mode).await?; // TODO: report export progress stream.await?; } @@ -369,8 +369,8 @@ impl BlobCommands { } else { BlobFormat::Raw }; - let status = iroh.blobs.status(hash).await?; - let ticket = iroh.blobs.share(hash, format, addr_options).await?; + let status = iroh.blobs().status(hash).await?; + let ticket = iroh.blobs().share(hash, format, addr_options).await?; let (blob_status, size) = match (status, format) { (BlobStatus::Complete { size }, BlobFormat::Raw) => ("blob", size), @@ -453,21 +453,21 @@ impl ListCommands { { match self { Self::Blobs => { - let mut response = iroh.blobs.list().await?; + let mut response = iroh.blobs().list().await?; while let Some(item) = response.next().await { let BlobInfo { path, hash, size } = item?; println!("{} {} ({})", path, hash, HumanBytes(size)); } } Self::IncompleteBlobs => { - let mut response = iroh.blobs.list_incomplete().await?; + let mut response = iroh.blobs().list_incomplete().await?; while let Some(item) = response.next().await { let IncompleteBlobInfo { hash, size, .. } = item?; println!("{} ({})", hash, HumanBytes(size)); } } Self::Collections => { - let mut response = iroh.blobs.list_collections()?; + let mut response = iroh.blobs().list_collections()?; while let Some(item) = response.next().await { let CollectionInfo { tag, @@ -513,7 +513,7 @@ impl DeleteCommands { { match self { Self::Blob { hash } => { - let response = iroh.blobs.delete_blob(hash).await; + let response = iroh.blobs().delete_blob(hash).await; if let Err(e) = response { eprintln!("Error: {}", e); } @@ -544,7 +544,7 @@ pub async fn consistency_check(iroh: &Iroh, verbose: u8, repair: bool) -> where C: ServiceConnection, { - let mut response = iroh.blobs.consistency_check(repair).await?; + let mut response = iroh.blobs().consistency_check(repair).await?; let verbosity = get_report_level(verbose); let print = |level: ReportLevel, entry: Option, message: String| { if level < verbosity { @@ -589,7 +589,7 @@ where C: ServiceConnection, { let mut state = ValidateProgressState::new(); - let mut response = iroh.blobs.validate(repair).await?; + let mut response = iroh.blobs().validate(repair).await?; let verbosity = get_report_level(verbose); let print = |level: ReportLevel, entry: Option, message: String| { if level < verbosity { @@ -854,7 +854,7 @@ pub async fn add>( // tell the node to add the data let stream = client - .blobs + .blobs() .add_from_path(absolute, in_place, tag, wrap) .await?; aggregate_add_response(stream).await? @@ -872,7 +872,7 @@ pub async fn add>( // tell the node to add the data let stream = client - .blobs + .blobs() .add_from_path(path_buf, false, tag, wrap) .await?; aggregate_add_response(stream).await? diff --git a/iroh-cli/src/commands/doc.rs b/iroh-cli/src/commands/doc.rs index 7c6465b5923..b2a13b3596d 100644 --- a/iroh-cli/src/commands/doc.rs +++ b/iroh-cli/src/commands/doc.rs @@ -317,7 +317,7 @@ impl DocCommands { bail!("The --switch flag is only supported within the Iroh console."); } - let doc = iroh.docs.create().await?; + let doc = iroh.docs().create().await?; println!("{}", doc.id()); if switch { @@ -330,7 +330,7 @@ impl DocCommands { bail!("The --switch flag is only supported within the Iroh console."); } - let doc = iroh.docs.import(ticket).await?; + let doc = iroh.docs().import(ticket).await?; println!("{}", doc.id()); if switch { @@ -339,7 +339,7 @@ impl DocCommands { } } Self::List => { - let mut stream = iroh.docs.list().await?; + let mut stream = iroh.docs().list().await?; while let Some((id, kind)) = stream.try_next().await? { println!("{id} {kind}") } @@ -483,7 +483,7 @@ impl DocCommands { } let stream = iroh - .blobs + .blobs() .add_from_path( root.clone(), in_place, @@ -627,7 +627,7 @@ impl DocCommands { .interact() .unwrap_or(false) { - iroh.docs.drop_doc(doc.id()).await?; + iroh.docs().drop_doc(doc.id()).await?; println!("Doc {} has been deleted.", fmt_short(doc.id())); } else { println!("Aborted.") @@ -681,7 +681,7 @@ async fn get_doc( where C: ServiceConnection, { - iroh.docs + iroh.docs() .open(env.doc(id)?) .await? .context("Document not found") @@ -975,8 +975,8 @@ mod tests { let node = crate::commands::start::start_node(data_dir.path(), None).await?; let client = node.client(); - let doc = client.docs.create().await.context("doc create")?; - let author = client.authors.create().await.context("author create")?; + let doc = client.docs().create().await.context("doc create")?; + let author = client.authors().create().await.context("author create")?; // set up command, getting iroh node let cli = ConsoleEnv::for_console(data_dir.path().to_owned(), &node) diff --git a/iroh-cli/src/commands/tag.rs b/iroh-cli/src/commands/tag.rs index 3d995d5a522..42c228266b2 100644 --- a/iroh-cli/src/commands/tag.rs +++ b/iroh-cli/src/commands/tag.rs @@ -26,7 +26,7 @@ impl TagCommands { { match self { Self::List => { - let mut response = iroh.tags.list().await?; + let mut response = iroh.tags().list().await?; while let Some(res) = response.next().await { let res = res?; println!("{}: {} ({:?})", res.name, res.hash, res.format); @@ -38,7 +38,7 @@ impl TagCommands { } else { Tag::from(tag) }; - iroh.tags.delete(tag).await?; + iroh.tags().delete(tag).await?; } } Ok(()) diff --git a/iroh-cli/src/config.rs b/iroh-cli/src/config.rs index 861c2ec5ad8..249b91af103 100644 --- a/iroh-cli/src/config.rs +++ b/iroh-cli/src/config.rs @@ -293,7 +293,7 @@ async fn env_author>( { Ok(author) } else { - iroh.authors.default().await + iroh.authors().default().await } } diff --git a/iroh/examples/client.rs b/iroh/examples/client.rs index 3e4d018aed9..0e04a91c305 100644 --- a/iroh/examples/client.rs +++ b/iroh/examples/client.rs @@ -16,8 +16,8 @@ async fn main() -> anyhow::Result<()> { // Could also use `node` directly, as it derefs to the client. let client = node.client(); - let doc = client.docs.create().await?; - let author = client.authors.default().await?; + let doc = client.docs().create().await?; + let author = client.authors().default().await?; doc.set_bytes(author, "hello", "world").await?; diff --git a/iroh/examples/collection-fetch.rs b/iroh/examples/collection-fetch.rs index e35f61ba95a..c827f13cdc4 100644 --- a/iroh/examples/collection-fetch.rs +++ b/iroh/examples/collection-fetch.rs @@ -59,7 +59,7 @@ async fn main() -> Result<()> { // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress // on the state of your download. let download_stream = node - .blobs + .blobs() .download_hash_seq(ticket.hash(), ticket.node_addr().clone()) .await?; @@ -76,7 +76,7 @@ async fn main() -> Result<()> { // A `Collection` is a special `HashSeq`, where we preserve the names of any blobs added to the collection. (We do this by designating the first entry in the `Collection` as meta data.) // To get the content of the collection, we first get the collection from the database using the `blobs` API let collection = node - .blobs + .blobs() .get_collection(ticket.hash()) .await .context("expect hash with `BlobFormat::HashSeq` to be a collection")?; @@ -85,7 +85,7 @@ async fn main() -> Result<()> { for (name, hash) in collection.iter() { println!("\nname: {name}, hash: {hash}"); // Use the hash of the blob to get the content. - let content = node.blobs.read_to_bytes(*hash).await?; + let content = node.blobs().read_to_bytes(*hash).await?; let s = std::str::from_utf8(&content).context("unable to parse blob as as utf-8 string")?; println!("{s}"); } diff --git a/iroh/examples/collection-provide.rs b/iroh/examples/collection-provide.rs index 37f05da5454..867b2ac5e3e 100644 --- a/iroh/examples/collection-provide.rs +++ b/iroh/examples/collection-provide.rs @@ -27,8 +27,8 @@ async fn main() -> anyhow::Result<()> { let node = iroh::node::Node::memory().spawn().await?; // Add two blobs - let blob1 = node.blobs.add_bytes("the first blob of bytes").await?; - let blob2 = node.blobs.add_bytes("the second blob of bytes").await?; + let blob1 = node.blobs().add_bytes("the first blob of bytes").await?; + let blob2 = node.blobs().add_bytes("the second blob of bytes").await?; // Create blobs from the data let collection: Collection = [("blob1", blob1.hash), ("blob2", blob2.hash)] @@ -37,14 +37,14 @@ async fn main() -> anyhow::Result<()> { // Create a collection let (hash, _) = node - .blobs + .blobs() .create_collection(collection, SetTagOption::Auto, Default::default()) .await?; // create a ticket // tickets wrap all details needed to get a collection let ticket = node - .blobs + .blobs() .share(hash, BlobFormat::HashSeq, Default::default()) .await?; diff --git a/iroh/examples/hello-world-fetch.rs b/iroh/examples/hello-world-fetch.rs index 71672845a81..06578b71eb1 100644 --- a/iroh/examples/hello-world-fetch.rs +++ b/iroh/examples/hello-world-fetch.rs @@ -59,7 +59,7 @@ async fn main() -> Result<()> { // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress // on the state of your download. let download_stream = node - .blobs + .blobs() .download(ticket.hash(), ticket.node_addr().clone()) .await?; @@ -74,7 +74,7 @@ async fn main() -> Result<()> { // Get the content we have just fetched from the iroh database. - let bytes = node.blobs.read_to_bytes(ticket.hash()).await?; + let bytes = node.blobs().read_to_bytes(ticket.hash()).await?; let s = std::str::from_utf8(&bytes).context("unable to parse blob as as utf-8 string")?; println!("{s}"); diff --git a/iroh/examples/hello-world-provide.rs b/iroh/examples/hello-world-provide.rs index 14be61aef5b..8fe0e9c12a8 100644 --- a/iroh/examples/hello-world-provide.rs +++ b/iroh/examples/hello-world-provide.rs @@ -23,11 +23,11 @@ async fn main() -> anyhow::Result<()> { let node = iroh::node::Node::memory().spawn().await?; // add some data and remember the hash - let res = node.blobs.add_bytes("Hello, world!").await?; + let res = node.blobs().add_bytes("Hello, world!").await?; // create a ticket let ticket = node - .blobs + .blobs() .share(res.hash, res.format, Default::default()) .await?; diff --git a/iroh/src/client.rs b/iroh/src/client.rs index 7cad7c55176..66eb926a264 100644 --- a/iroh/src/client.rs +++ b/iroh/src/client.rs @@ -26,12 +26,16 @@ mod node; #[derive(Debug, Clone)] pub struct Iroh { /// Client for blobs operations. + #[deprecated(note = "Use `blobs` method instead", since = "0.18.0")] pub blobs: blobs::Client, /// Client for docs operations. + #[deprecated(note = "Use `docs` method instead", since = "0.18.0")] pub docs: docs::Client, /// Client for author operations. + #[deprecated(note = "Use `authors` method instead", since = "0.18.0")] pub authors: authors::Client, /// Client for tags operations. + #[deprecated(note = "Use `tags` method instead", since = "0.18.0")] pub tags: tags::Client, rpc: RpcClient, @@ -43,6 +47,7 @@ where { /// Create a new high-level client to a Iroh node from the low-level RPC client. pub fn new(rpc: RpcClient) -> Self { + #[allow(deprecated)] Self { blobs: blobs::Client { rpc: rpc.clone() }, docs: docs::Client { rpc: rpc.clone() }, @@ -51,6 +56,30 @@ where rpc, } } + + /// Client for blobs operations. + pub fn blobs(&self) -> &blobs::Client { + #[allow(deprecated)] + &self.blobs + } + + /// Client for docs operations. + pub fn docs(&self) -> &docs::Client { + #[allow(deprecated)] + &self.docs + } + + /// Client for author operations. + pub fn authors(&self) -> &authors::Client { + #[allow(deprecated)] + &self.authors + } + + /// Client for tags operations. + pub fn tags(&self) -> &tags::Client { + #[allow(deprecated)] + &self.tags + } } fn flatten( diff --git a/iroh/src/client/authors.rs b/iroh/src/client/authors.rs index b695b3da7c1..e6bddbb494b 100644 --- a/iroh/src/client/authors.rs +++ b/iroh/src/client/authors.rs @@ -101,33 +101,33 @@ mod tests { let node = Node::memory().spawn().await?; // default author always exists - let authors: Vec<_> = node.authors.list().await?.try_collect().await?; + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; assert_eq!(authors.len(), 1); - let default_author = node.authors.default().await?; + let default_author = node.authors().default().await?; assert_eq!(authors, vec![default_author]); - let author_id = node.authors.create().await?; + let author_id = node.authors().create().await?; - let authors: Vec<_> = node.authors.list().await?.try_collect().await?; + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; assert_eq!(authors.len(), 2); let author = node - .authors + .authors() .export(author_id) .await? .expect("should have author"); - node.authors.delete(author_id).await?; - let authors: Vec<_> = node.authors.list().await?.try_collect().await?; + node.authors().delete(author_id).await?; + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; assert_eq!(authors.len(), 1); - node.authors.import(author).await?; + node.authors().import(author).await?; - let authors: Vec<_> = node.authors.list().await?.try_collect().await?; + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; assert_eq!(authors.len(), 2); - assert!(node.authors.default().await? != author_id); - node.authors.set_default(author_id).await?; - assert_eq!(node.authors.default().await?, author_id); + assert!(node.authors().default().await? != author_id); + node.authors().set_default(author_id).await?; + assert_eq!(node.authors().default().await?, author_id); Ok(()) } diff --git a/iroh/src/client/blobs.rs b/iroh/src/client/blobs.rs index e1e98cae2ef..b887edf9fe7 100644 --- a/iroh/src/client/blobs.rs +++ b/iroh/src/client/blobs.rs @@ -47,7 +47,7 @@ pub struct Client { impl<'a, C: ServiceConnection> From<&'a Iroh> for &'a RpcClient { fn from(client: &'a Iroh) -> &'a RpcClient { - &client.blobs.rpc + &client.blobs().rpc } } @@ -936,7 +936,7 @@ mod tests { // import files for path in &paths { let import_outcome = client - .blobs + .blobs() .add_from_path( path.to_path_buf(), false, @@ -957,11 +957,11 @@ mod tests { } let (hash, tag) = client - .blobs + .blobs() .create_collection(collection, SetTagOption::Auto, tags) .await?; - let collections: Vec<_> = client.blobs.list_collections()?.try_collect().await?; + let collections: Vec<_> = client.blobs().list_collections()?.try_collect().await?; assert_eq!(collections.len(), 1); { @@ -978,7 +978,7 @@ mod tests { } // check that "temp" tags have been deleted - let tags: Vec<_> = client.tags.list().await?.try_collect().await?; + let tags: Vec<_> = client.tags().list().await?.try_collect().await?; assert_eq!(tags.len(), 1); assert_eq!(tags[0].hash, hash); assert_eq!(tags[0].name, tag); @@ -1013,7 +1013,7 @@ mod tests { let client = node.client(); let import_outcome = client - .blobs + .blobs() .add_from_path( path.to_path_buf(), false, @@ -1029,28 +1029,28 @@ mod tests { let hash = import_outcome.hash; // Read everything - let res = client.blobs.read_to_bytes(hash).await?; + let res = client.blobs().read_to_bytes(hash).await?; assert_eq!(&res, &buf[..]); // Read at smaller than blob_get_chunk_size - let res = client.blobs.read_at_to_bytes(hash, 0, Some(100)).await?; + let res = client.blobs().read_at_to_bytes(hash, 0, Some(100)).await?; assert_eq!(res.len(), 100); assert_eq!(&res[..], &buf[0..100]); - let res = client.blobs.read_at_to_bytes(hash, 20, Some(120)).await?; + let res = client.blobs().read_at_to_bytes(hash, 20, Some(120)).await?; assert_eq!(res.len(), 120); assert_eq!(&res[..], &buf[20..140]); // Read at equal to blob_get_chunk_size let res = client - .blobs + .blobs() .read_at_to_bytes(hash, 0, Some(1024 * 64)) .await?; assert_eq!(res.len(), 1024 * 64); assert_eq!(&res[..], &buf[0..1024 * 64]); let res = client - .blobs + .blobs() .read_at_to_bytes(hash, 20, Some(1024 * 64)) .await?; assert_eq!(res.len(), 1024 * 64); @@ -1058,26 +1058,26 @@ mod tests { // Read at larger than blob_get_chunk_size let res = client - .blobs + .blobs() .read_at_to_bytes(hash, 0, Some(10 + 1024 * 64)) .await?; assert_eq!(res.len(), 10 + 1024 * 64); assert_eq!(&res[..], &buf[0..(10 + 1024 * 64)]); let res = client - .blobs + .blobs() .read_at_to_bytes(hash, 20, Some(10 + 1024 * 64)) .await?; assert_eq!(res.len(), 10 + 1024 * 64); assert_eq!(&res[..], &buf[20..(20 + 10 + 1024 * 64)]); // full length - let res = client.blobs.read_at_to_bytes(hash, 20, None).await?; + let res = client.blobs().read_at_to_bytes(hash, 20, None).await?; assert_eq!(res.len(), 1024 * 128 - 20); assert_eq!(&res[..], &buf[20..]); // size should be total - let reader = client.blobs.read_at(hash, 0, Some(20)).await?; + let reader = client.blobs().read_at(hash, 0, Some(20)).await?; assert_eq!(reader.size(), 1024 * 128); assert_eq!(reader.response_size, 20); @@ -1119,7 +1119,7 @@ mod tests { // import files for path in &paths { let import_outcome = client - .blobs + .blobs() .add_from_path( path.to_path_buf(), false, @@ -1140,11 +1140,11 @@ mod tests { } let (hash, _tag) = client - .blobs + .blobs() .create_collection(collection, SetTagOption::Auto, tags) .await?; - let collection = client.blobs.get_collection(hash).await?; + let collection = client.blobs().get_collection(hash).await?; // 5 blobs assert_eq!(collection.len(), 5); @@ -1178,7 +1178,7 @@ mod tests { let client = node.client(); let import_outcome = client - .blobs + .blobs() .add_from_path( path.to_path_buf(), false, @@ -1192,12 +1192,12 @@ mod tests { .context("import finish")?; let ticket = client - .blobs + .blobs() .share(import_outcome.hash, BlobFormat::Raw, Default::default()) .await?; assert_eq!(ticket.hash(), import_outcome.hash); - let status = client.blobs.status(import_outcome.hash).await?; + let status = client.blobs().status(import_outcome.hash).await?; assert_eq!(status, BlobStatus::Complete { size }); Ok(()) diff --git a/iroh/src/client/docs.rs b/iroh/src/client/docs.rs index 2a35233eba6..77e73244117 100644 --- a/iroh/src/client/docs.rs +++ b/iroh/src/client/docs.rs @@ -768,7 +768,7 @@ mod tests { let node = crate::node::Node::memory().spawn().await?; let client = node.client(); - let doc = client.docs.create().await?; + let doc = client.docs().create().await?; let res = std::thread::spawn(move || { drop(doc); @@ -809,8 +809,8 @@ mod tests { // create doc & author let client = node.client(); - let doc = client.docs.create().await.context("doc create")?; - let author = client.authors.create().await.context("author create")?; + let doc = client.docs().create().await.context("doc create")?; + let author = client.authors().create().await.context("author create")?; // import file let import_outcome = doc diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 7e0c6c29757..3b9173c7062 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -228,7 +228,7 @@ mod tests { let node = Node::memory().spawn().await.unwrap(); let hash = node .client() - .blobs + .blobs() .add_bytes(Bytes::from_static(b"hello")) .await .unwrap() @@ -236,7 +236,7 @@ mod tests { let _drop_guard = node.cancel_token().drop_guard(); let ticket = node - .blobs + .blobs() .share(hash, BlobFormat::Raw, AddrInfoOptions::RelayAndAddresses) .await .unwrap(); @@ -255,10 +255,13 @@ mod tests { let client = node.client(); let input = vec![2u8; 1024 * 256]; // 265kb so actually streaming, chunk size is 64kb let reader = Cursor::new(input.clone()); - let progress = client.blobs.add_reader(reader, SetTagOption::Auto).await?; + let progress = client + .blobs() + .add_reader(reader, SetTagOption::Auto) + .await?; let outcome = progress.finish().await?; let hash = outcome.hash; - let output = client.blobs.read_to_bytes(hash).await?; + let output = client.blobs().read_to_bytes(hash).await?; assert_eq!(input, output.to_vec()); Ok(()) } @@ -312,13 +315,13 @@ mod tests { let iroh_root = tempfile::TempDir::new()?; { let iroh = Node::persistent(iroh_root.path()).await?.spawn().await?; - let doc = iroh.docs.create().await?; + let doc = iroh.docs().create().await?; drop(doc); iroh.shutdown().await?; } let iroh = Node::persistent(iroh_root.path()).await?.spawn().await?; - let _doc = iroh.docs.create().await?; + let _doc = iroh.docs().create().await?; Ok(()) } @@ -340,14 +343,14 @@ mod tests { .insecure_skip_relay_cert_verify(true) .spawn() .await?; - let AddOutcome { hash, .. } = node1.blobs.add_bytes(b"foo".to_vec()).await?; + let AddOutcome { hash, .. } = node1.blobs().add_bytes(b"foo".to_vec()).await?; // create a node addr with only a relay URL, no direct addresses let addr = NodeAddr::new(node1.node_id()).with_relay_url(relay_url); - node2.blobs.download(hash, addr).await?.await?; + node2.blobs().download(hash, addr).await?.await?; assert_eq!( node2 - .blobs + .blobs() .read_to_bytes(hash) .await .context("get")? @@ -383,14 +386,14 @@ mod tests { .node_discovery(dns_pkarr_server.discovery(secret2).into()) .spawn() .await?; - let hash = node1.blobs.add_bytes(b"foo".to_vec()).await?.hash; + let hash = node1.blobs().add_bytes(b"foo".to_vec()).await?.hash; // create a node addr with node id only let addr = NodeAddr::new(node1.node_id()); - node2.blobs.download(hash, addr).await?.await?; + node2.blobs().download(hash, addr).await?.await?; assert_eq!( node2 - .blobs + .blobs() .read_to_bytes(hash) .await .context("get")? @@ -403,9 +406,9 @@ mod tests { #[tokio::test] async fn test_default_author_memory() -> Result<()> { let iroh = Node::memory().spawn().await?; - let author = iroh.authors.default().await?; - assert!(iroh.authors.export(author).await?.is_some()); - assert!(iroh.authors.delete(author).await.is_err()); + let author = iroh.authors().default().await?; + assert!(iroh.authors().export(author).await?.is_some()); + assert!(iroh.authors().delete(author).await.is_err()); Ok(()) } @@ -427,9 +430,9 @@ mod tests { .spawn() .await .unwrap(); - let author = iroh.authors.default().await.unwrap(); - assert!(iroh.authors.export(author).await.unwrap().is_some()); - assert!(iroh.authors.delete(author).await.is_err()); + let author = iroh.authors().default().await.unwrap(); + assert!(iroh.authors().export(author).await.unwrap().is_some()); + assert!(iroh.authors().delete(author).await.is_err()); iroh.shutdown().await.unwrap(); author }; @@ -442,10 +445,10 @@ mod tests { .spawn() .await .unwrap(); - let author = iroh.authors.default().await.unwrap(); + let author = iroh.authors().default().await.unwrap(); assert_eq!(author, default_author); - assert!(iroh.authors.export(author).await.unwrap().is_some()); - assert!(iroh.authors.delete(author).await.is_err()); + assert!(iroh.authors().export(author).await.unwrap().is_some()); + assert!(iroh.authors().delete(author).await.is_err()); iroh.shutdown().await.unwrap(); }; @@ -461,10 +464,10 @@ mod tests { .spawn() .await .unwrap(); - let author = iroh.authors.default().await.unwrap(); + let author = iroh.authors().default().await.unwrap(); assert!(author != default_author); - assert!(iroh.authors.export(author).await.unwrap().is_some()); - assert!(iroh.authors.delete(author).await.is_err()); + assert!(iroh.authors().export(author).await.unwrap().is_some()); + assert!(iroh.authors().delete(author).await.is_err()); iroh.shutdown().await.unwrap(); author }; @@ -504,9 +507,9 @@ mod tests { .spawn() .await .unwrap(); - let author = iroh.authors.create().await.unwrap(); - iroh.authors.set_default(author).await.unwrap(); - assert_eq!(iroh.authors.default().await.unwrap(), author); + let author = iroh.authors().create().await.unwrap(); + iroh.authors().set_default(author).await.unwrap(); + assert_eq!(iroh.authors().default().await.unwrap(), author); iroh.shutdown().await.unwrap(); author }; @@ -517,7 +520,7 @@ mod tests { .spawn() .await .unwrap(); - assert_eq!(iroh.authors.default().await.unwrap(), default_author); + assert_eq!(iroh.authors().default().await.unwrap(), default_author); iroh.shutdown().await.unwrap(); } diff --git a/iroh/tests/gc.rs b/iroh/tests/gc.rs index 4c3c3fc26f3..dcca0893b54 100644 --- a/iroh/tests/gc.rs +++ b/iroh/tests/gc.rs @@ -232,8 +232,8 @@ mod file { let bao_store = iroh_blobs::store::fs::Store::load(dir.join("store")).await?; let (node, _) = wrap_in_node(bao_store.clone(), Duration::from_secs(10)).await; let client = node.client(); - let doc = client.docs.create().await?; - let author = client.authors.create().await?; + let doc = client.docs().create().await?; + let author = client.authors().create().await?; let temp_path = dir.join("temp"); tokio::fs::create_dir_all(&temp_path).await?; let mut to_import = Vec::new(); diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs index a4f005fe588..13376273dd6 100644 --- a/iroh/tests/provide.rs +++ b/iroh/tests/provide.rs @@ -391,7 +391,7 @@ async fn test_run_ticket() { let _drop_guard = node.cancel_token().drop_guard(); let ticket = node - .blobs + .blobs() .share( hash, BlobFormat::HashSeq, diff --git a/iroh/tests/sync.rs b/iroh/tests/sync.rs index afa25915884..a5e9b8a463d 100644 --- a/iroh/tests/sync.rs +++ b/iroh/tests/sync.rs @@ -85,8 +85,8 @@ async fn sync_simple() -> Result<()> { // create doc on node0 let peer0 = nodes[0].node_id(); - let author0 = clients[0].authors.create().await?; - let doc0 = clients[0].docs.create().await?; + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; let hash0 = doc0 .set_bytes(author0, b"k1".to_vec(), b"v1".to_vec()) .await?; @@ -99,7 +99,7 @@ async fn sync_simple() -> Result<()> { info!("node1: join"); let peer1 = nodes[1].node_id(); - let doc1 = clients[1].docs.import(ticket.clone()).await?; + let doc1 = clients[1].docs().import(ticket.clone()).await?; let mut events1 = doc1.subscribe().await?; info!("node1: assert 4 events"); assert_next_unordered( @@ -140,9 +140,9 @@ async fn sync_subscribe_no_sync() -> Result<()> { setup_logging(); let node = spawn_node(0, &mut rng).await?; let client = node.client(); - let doc = client.docs.create().await?; + let doc = client.docs().create().await?; let mut sub = doc.subscribe().await?; - let author = client.authors.create().await?; + let author = client.authors().create().await?; doc.set_bytes(author, b"k".to_vec(), b"v".to_vec()).await?; let event = tokio::time::timeout(Duration::from_millis(100), sub.next()).await?; assert!( @@ -165,15 +165,15 @@ async fn sync_gossip_bulk() -> Result<()> { let clients = nodes.iter().map(|node| node.client()).collect::>(); let _peer0 = nodes[0].node_id(); - let author0 = clients[0].authors.create().await?; - let doc0 = clients[0].docs.create().await?; + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; let mut ticket = doc0 .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) .await?; // unset peers to not yet start sync let peers = ticket.nodes.clone(); ticket.nodes = vec![]; - let doc1 = clients[1].docs.import(ticket).await?; + let doc1 = clients[1].docs().import(ticket).await?; let mut events = doc1.subscribe().await?; // create entries for initial sync. @@ -255,8 +255,8 @@ async fn sync_full_basic() -> Result<()> { // peer0: create doc and ticket let peer0 = nodes[0].node_id(); - let author0 = clients[0].authors.create().await?; - let doc0 = clients[0].docs.create().await?; + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; let mut events0 = doc0.subscribe().await?; let key0 = b"k1"; let value0 = b"v1"; @@ -277,9 +277,9 @@ async fn sync_full_basic() -> Result<()> { info!("peer1: spawn"); let peer1 = nodes[1].node_id(); - let author1 = clients[1].authors.create().await?; + let author1 = clients[1].authors().create().await?; info!("peer1: join doc"); - let doc1 = clients[1].docs.import(ticket.clone()).await?; + let doc1 = clients[1].docs().import(ticket.clone()).await?; info!("peer1: wait for 4 events (for sync and join with peer0)"); let mut events1 = doc1.subscribe().await?; @@ -345,7 +345,7 @@ async fn sync_full_basic() -> Result<()> { info!("peer2: spawn"); nodes.push(spawn_node(nodes.len(), &mut rng).await?); clients.push(nodes.last().unwrap().client().clone()); - let doc2 = clients[2].docs.import(ticket).await?; + let doc2 = clients[2].docs().import(ticket).await?; let peer2 = nodes[2].node_id(); let mut events2 = doc2.subscribe().await?; @@ -428,11 +428,11 @@ async fn sync_open_close() -> Result<()> { let node = spawn_node(0, &mut rng).await?; let client = node.client(); - let doc = client.docs.create().await?; + let doc = client.docs().create().await?; let status = doc.status().await?; assert_eq!(status.handles, 1); - let doc2 = client.docs.open(doc.id()).await?.unwrap(); + let doc2 = client.docs().open(doc.id()).await?.unwrap(); let status = doc2.status().await?; assert_eq!(status.handles, 2); @@ -452,8 +452,8 @@ async fn sync_subscribe_stop_close() -> Result<()> { let node = spawn_node(0, &mut rng).await?; let client = node.client(); - let doc = client.docs.create().await?; - let author = client.authors.create().await?; + let doc = client.docs().create().await?; + let author = client.authors().create().await?; let status = doc.status().await?; assert_eq!(status.subscribers, 0); @@ -504,8 +504,8 @@ async fn test_sync_via_relay() -> Result<()> { .spawn() .await?; - let doc1 = node1.docs.create().await?; - let author1 = node1.authors.create().await?; + let doc1 = node1.docs().create().await?; + let author1 = node1.authors().create().await?; let inserted_hash = doc1 .set_bytes(author1, b"foo".to_vec(), b"bar".to_vec()) .await?; @@ -517,7 +517,7 @@ async fn test_sync_via_relay() -> Result<()> { ticket.nodes[0].info.direct_addresses = Default::default(); // join - let doc2 = node2.docs.import(ticket).await?; + let doc2 = node2.docs().import(ticket).await?; let mut events = doc2.subscribe().await?; assert_next_unordered_with_optionals( @@ -598,7 +598,7 @@ async fn sync_restart_node() -> Result<()> { let id1 = node1.node_id(); // create doc & ticket on node1 - let doc1 = node1.docs.create().await?; + let doc1 = node1.docs().create().await?; let mut events1 = doc1.subscribe().await?; let ticket = doc1 .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) @@ -615,8 +615,8 @@ async fn sync_restart_node() -> Result<()> { .spawn() .await?; let id2 = node2.node_id(); - let author2 = node2.authors.create().await?; - let doc2 = node2.docs.import(ticket.clone()).await?; + let author2 = node2.authors().create().await?; + let doc2 = node2.docs().import(ticket.clone()).await?; info!("node2 set a"); let hash_a = doc2.set_bytes(author2, "n2/a", "a").await?; @@ -662,7 +662,7 @@ async fn sync_restart_node() -> Result<()> { .await?; assert_eq!(id1, node1.node_id()); - let doc1 = node1.docs.open(doc1.id()).await?.expect("doc to exist"); + let doc1 = node1.docs().open(doc1.id()).await?.expect("doc to exist"); let mut events1 = doc1.subscribe().await?; assert_latest(&doc1, b"n2/a", b"a").await; @@ -748,14 +748,14 @@ async fn test_download_policies() -> Result<()> { let nodes = spawn_nodes(2, &mut rng).await?; let clients = nodes.iter().map(|node| node.client()).collect::>(); - let doc_a = clients[0].docs.create().await?; - let author_a = clients[0].authors.create().await?; + let doc_a = clients[0].docs().create().await?; + let author_a = clients[0].authors().create().await?; let ticket = doc_a .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) .await?; - let doc_b = clients[1].docs.import(ticket).await?; - let author_b = clients[1].authors.create().await?; + let doc_b = clients[1].docs().import(ticket).await?; + let author_b = clients[1].authors().create().await?; doc_a.set_download_policy(policy_a).await?; doc_b.set_download_policy(policy_b).await?; @@ -871,9 +871,9 @@ async fn sync_big() -> Result<()> { let nodes = spawn_nodes(n_nodes, &mut rng).await?; let node_ids = nodes.iter().map(|node| node.node_id()).collect::>(); let clients = nodes.iter().map(|node| node.client()).collect::>(); - let authors = collect_futures(clients.iter().map(|c| c.authors.create())).await?; + let authors = collect_futures(clients.iter().map(|c| c.authors().create())).await?; - let doc0 = clients[0].docs.create().await?; + let doc0 = clients[0].docs().create().await?; let mut ticket = doc0 .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) .await?; @@ -888,7 +888,7 @@ async fn sync_big() -> Result<()> { clients .iter() .skip(1) - .map(|c| c.docs.import(ticket.clone())), + .map(|c| c.docs().import(ticket.clone())), ) .await?, ); @@ -985,19 +985,19 @@ async fn test_list_docs_stream() -> Result<()> { // create docs for _i in 0..count { - let doc = node.docs.create().await?; + let doc = node.docs().create().await?; doc.close().await?; } // create doc stream - let mut stream = node.docs.list().await?; + let mut stream = node.docs().list().await?; // process each doc and call into the docs actor. // this makes sure that we don't deadlock the docs actor. let mut i = 0; let fut = async { while let Some((id, _)) = stream.try_next().await.unwrap() { - let _doc = node.docs.open(id).await.unwrap().unwrap(); + let _doc = node.docs().open(id).await.unwrap().unwrap(); i += 1; } }; @@ -1151,8 +1151,8 @@ async fn doc_delete() -> Result<()> { .spawn() .await?; let client = node.client(); - let doc = client.docs.create().await?; - let author = client.authors.create().await?; + let doc = client.docs().create().await?; + let author = client.authors().create().await?; let hash = doc .set_bytes(author, b"foo".to_vec(), b"hi".to_vec()) .await?; @@ -1166,7 +1166,7 @@ async fn doc_delete() -> Result<()> { // wait for gc // TODO: allow to manually trigger gc tokio::time::sleep(Duration::from_millis(200)).await; - let bytes = client.blobs.read_to_bytes(hash).await; + let bytes = client.blobs().read_to_bytes(hash).await; assert!(bytes.is_err()); node.shutdown().await?; Ok(()) @@ -1179,8 +1179,8 @@ async fn sync_drop_doc() -> Result<()> { let node = spawn_node(0, &mut rng).await?; let client = node.client(); - let doc = client.docs.create().await?; - let author = client.authors.create().await?; + let doc = client.docs().create().await?; + let author = client.authors().create().await?; let mut sub = doc.subscribe().await?; doc.set_bytes(author, b"foo".to_vec(), b"bar".to_vec()) @@ -1188,14 +1188,14 @@ async fn sync_drop_doc() -> Result<()> { let ev = sub.next().await; assert!(matches!(ev, Some(Ok(LiveEvent::InsertLocal { .. })))); - client.docs.drop_doc(doc.id()).await?; + client.docs().drop_doc(doc.id()).await?; let res = doc.get_exact(author, b"foo".to_vec(), true).await; assert!(res.is_err()); let res = doc .set_bytes(author, b"foo".to_vec(), b"bar".to_vec()) .await; assert!(res.is_err()); - let res = client.docs.open(doc.id()).await; + let res = client.docs().open(doc.id()).await; assert!(res.is_err()); let ev = sub.next().await; assert!(ev.is_none());