diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a147b81421..b5486b01aa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -99,7 +99,7 @@ jobs: # uses: obi1kenobi/cargo-semver-checks-action@v2 uses: n0-computer/cargo-semver-checks-action@feat-baseline with: - package: iroh, iroh-base, iroh-bytes, iroh-cli, iroh-dns-server, iroh-gossip, iroh-metrics, iroh-net, iroh-sync + package: iroh, iroh-base, iroh-blobs, iroh-cli, iroh-dns-server, iroh-gossip, iroh-metrics, iroh-net, iroh-docs baseline-rev: ${{ env.HEAD_COMMIT_SHA }} use-cache: false diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7e57179506..badd64f42d 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -23,7 +23,7 @@ env: RUSTFLAGS: -Dwarnings RUSTDOCFLAGS: -Dwarnings SCCACHE_CACHE_SIZE: "50G" - CRATES_LIST: "iroh,iroh-bytes,iroh-gossip,iroh-metrics,iroh-net,iroh-sync,iroh-test,iroh-cli,iroh-dns-server" + CRATES_LIST: "iroh,iroh-blobs,iroh-gossip,iroh-metrics,iroh-net,iroh-docs,iroh-test,iroh-cli,iroh-dns-server" jobs: build_and_test_nix: @@ -66,7 +66,7 @@ jobs: uses: taiki-e/install-action@v2 with: tool: nextest - + - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.4 diff --git a/Cargo.lock b/Cargo.lock index 8fb2f0ec7a..d767585b3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2314,12 +2314,12 @@ dependencies = [ "indicatif", "iroh", "iroh-base", - "iroh-bytes", + "iroh-blobs", + "iroh-docs", "iroh-gossip", "iroh-io", "iroh-metrics", "iroh-net", - "iroh-sync", "iroh-test", "num_cpus", "parking_lot", @@ -2389,7 +2389,7 @@ dependencies = [ ] [[package]] -name = "iroh-bytes" +name = "iroh-blobs" version = "0.15.0" dependencies = [ "anyhow", @@ -2406,7 +2406,7 @@ dependencies = [ "hex", "http-body 0.4.6", "iroh-base", - "iroh-bytes", + "iroh-blobs", "iroh-io", "iroh-metrics", "iroh-net", @@ -2536,6 +2536,44 @@ dependencies = [ "z32", ] +[[package]] +name = "iroh-docs" +version = "0.15.0" +dependencies = [ + "anyhow", + "bytes", + "derive_more", + "ed25519-dalek", + "flume", + "futures-util", + "hex", + "iroh-base", + "iroh-blake3", + "iroh-metrics", + "iroh-net", + "iroh-test", + "lru", + "num_enum", + "postcard", + "proptest", + "quinn", + "rand", + "rand_chacha", + "rand_core", + "redb 1.5.1", + "redb 2.1.0", + "self_cell", + "serde", + "strum 0.25.0", + "tempfile", + "test-strategy", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", +] + [[package]] name = "iroh-gossip" version = "0.15.0" @@ -2700,44 +2738,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "iroh-sync" -version = "0.15.0" -dependencies = [ - "anyhow", - "bytes", - "derive_more", - "ed25519-dalek", - "flume", - "futures-util", - "hex", - "iroh-base", - "iroh-blake3", - "iroh-metrics", - "iroh-net", - "iroh-test", - "lru", - "num_enum", - "postcard", - "proptest", - "quinn", - "rand", - "rand_chacha", - "rand_core", - "redb 1.5.1", - "redb 2.1.0", - "self_cell", - "serde", - "strum 0.25.0", - "tempfile", - "test-strategy", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tracing", -] - [[package]] name = "iroh-test" version = "0.15.0" diff --git a/Cargo.toml b/Cargo.toml index 24dc3874d3..a6099c70fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,13 @@ [workspace] members = [ "iroh", - "iroh-bytes", + "iroh-blobs", "iroh-base", "iroh-dns-server", "iroh-gossip", "iroh-metrics", "iroh-net", - "iroh-sync", + "iroh-docs", "iroh-test", "iroh-net/bench", "iroh-cli" diff --git a/iroh-bytes/Cargo.toml b/iroh-blobs/Cargo.toml similarity index 97% rename from iroh-bytes/Cargo.toml rename to iroh-blobs/Cargo.toml index 42cd1b6787..d0dba588ce 100644 --- a/iroh-bytes/Cargo.toml +++ b/iroh-blobs/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "iroh-bytes" +name = "iroh-blobs" version = "0.15.0" edition = "2021" readme = "README.md" @@ -52,7 +52,7 @@ tracing-futures = "0.2.5" [dev-dependencies] http-body = "0.4.5" -iroh-bytes = { path = ".", features = ["downloader"] } +iroh-blobs = { path = ".", features = ["downloader"] } iroh-test = { path = "../iroh-test" } futures-buffered = "0.2.4" proptest = "1.0.0" diff --git a/iroh-bytes/README.md b/iroh-blobs/README.md similarity index 93% rename from iroh-bytes/README.md rename to iroh-blobs/README.md index a869953d10..0958f5ffd1 100644 --- a/iroh-bytes/README.md +++ b/iroh-blobs/README.md @@ -1,4 +1,4 @@ -# iroh-bytes +# iroh-blobs This crate provides blob and collection transfer support for iroh. It implements a simple request-response protocol based on blake3 verified streaming. @@ -23,7 +23,7 @@ This crate is usually used together with [iroh-net](https://crates.io/crates/iro ## Examples -Examples that use `iroh-bytes` can be found in the `iroh` crate. the iroh crate publishes `iroh_bytes` as `iroh::bytes`. +Examples that use `iroh-blobs` can be found in the `iroh` crate. the iroh crate publishes `iroh_blobs` as `iroh::bytes`. # License diff --git a/iroh-bytes/docs/img/get_machine.drawio b/iroh-blobs/docs/img/get_machine.drawio similarity index 100% rename from iroh-bytes/docs/img/get_machine.drawio rename to iroh-blobs/docs/img/get_machine.drawio diff --git a/iroh-bytes/docs/img/get_machine.drawio.svg b/iroh-blobs/docs/img/get_machine.drawio.svg similarity index 100% rename from iroh-bytes/docs/img/get_machine.drawio.svg rename to iroh-blobs/docs/img/get_machine.drawio.svg diff --git a/iroh-bytes/examples/connect/mod.rs b/iroh-blobs/examples/connect/mod.rs similarity index 100% rename from iroh-bytes/examples/connect/mod.rs rename to iroh-blobs/examples/connect/mod.rs diff --git a/iroh-bytes/examples/fetch-fsm.rs b/iroh-blobs/examples/fetch-fsm.rs similarity index 96% rename from iroh-bytes/examples/fetch-fsm.rs rename to iroh-blobs/examples/fetch-fsm.rs index a36644eaf0..d93d445021 100644 --- a/iroh-bytes/examples/fetch-fsm.rs +++ b/iroh-blobs/examples/fetch-fsm.rs @@ -9,7 +9,7 @@ use anyhow::{Context, Result}; use iroh_io::ConcatenateSliceWriter; use tracing_subscriber::{prelude::*, EnvFilter}; -use iroh_bytes::{ +use iroh_blobs::{ get::fsm::{AtInitial, ConnectedNext, EndBlobNext}, hashseq::HashSeq, protocol::GetRequest, @@ -64,14 +64,14 @@ async fn main() -> Result<()> { // create a request for a collection let request = GetRequest::all(hash); // create the initial state of the finite state machine - let initial = iroh_bytes::get::fsm::start(connection, request); + let initial = iroh_blobs::get::fsm::start(connection, request); write_collection(initial).await } else { // create a request for a single blob let request = GetRequest::single(hash); // create the initial state of the finite state machine - let initial = iroh_bytes::get::fsm::start(connection, request); + let initial = iroh_blobs::get::fsm::start(connection, request); write_blob(initial).await } @@ -120,7 +120,7 @@ async fn write_collection(initial: AtInitial) -> Result<()> { } // move to the header - let header: iroh_bytes::get::fsm::AtBlobHeader = start_root.next(); + let header: iroh_blobs::get::fsm::AtBlobHeader = start_root.next(); let (root_end, hashes_bytes) = header.concatenate_into_vec().await?; let next = root_end.next(); let EndBlobNext::MoreChildren(at_meta) = next else { diff --git a/iroh-bytes/examples/fetch-stream.rs b/iroh-blobs/examples/fetch-stream.rs similarity index 97% rename from iroh-bytes/examples/fetch-stream.rs rename to iroh-blobs/examples/fetch-stream.rs index e3ee545691..7bd51efe07 100644 --- a/iroh-bytes/examples/fetch-stream.rs +++ b/iroh-blobs/examples/fetch-stream.rs @@ -17,7 +17,7 @@ use genawaiter::sync::Co; use genawaiter::sync::Gen; use tokio::io::AsyncWriteExt; -use iroh_bytes::{ +use iroh_blobs::{ get::fsm::{AtInitial, BlobContentNext, ConnectedNext, EndBlobNext}, hashseq::HashSeq, protocol::GetRequest, @@ -73,7 +73,7 @@ async fn main() -> Result<()> { let request = GetRequest::all(hash); // create the initial state of the finite state machine - let initial = iroh_bytes::get::fsm::start(connection, request); + let initial = iroh_blobs::get::fsm::start(connection, request); // create a stream that yields all the data of the blob stream_children(initial).boxed_local() @@ -82,7 +82,7 @@ async fn main() -> Result<()> { let request = GetRequest::single(hash); // create the initial state of the finite state machine - let initial = iroh_bytes::get::fsm::start(connection, request); + let initial = iroh_blobs::get::fsm::start(connection, request); // create a stream that yields all the data of the blob stream_blob(initial).boxed_local() @@ -171,7 +171,7 @@ fn stream_children(initial: AtInitial) -> impl Stream> )); } // move to the header - let header: iroh_bytes::get::fsm::AtBlobHeader = start_root.next(); + let header: iroh_blobs::get::fsm::AtBlobHeader = start_root.next(); let (root_end, hashes_bytes) = header.concatenate_into_vec().await?; // parse the hashes from the hash sequence bytes diff --git a/iroh-bytes/examples/provide-bytes.rs b/iroh-blobs/examples/provide-bytes.rs similarity index 91% rename from iroh-bytes/examples/provide-bytes.rs rename to iroh-blobs/examples/provide-bytes.rs index d5bdaa3618..c355e38a91 100644 --- a/iroh-bytes/examples/provide-bytes.rs +++ b/iroh-blobs/examples/provide-bytes.rs @@ -13,7 +13,7 @@ use anyhow::Result; use tokio_util::task::LocalPoolHandle; use tracing_subscriber::{prelude::*, EnvFilter}; -use iroh_bytes::{format::collection::Collection, Hash}; +use iroh_blobs::{format::collection::Collection, Hash}; mod connect; use connect::{make_and_write_certs, make_server_endpoint, CERT_PATH}; @@ -47,7 +47,7 @@ async fn main() -> Result<()> { println!("\nprovide bytes {format} example!"); let (db, hash) = if format == "collection" { - let (mut db, names) = iroh_bytes::store::readonly_mem::Store::new([ + let (mut db, names) = iroh_blobs::store::readonly_mem::Store::new([ ("blob1", b"the first blob of bytes".to_vec()), ("blob2", b"the second blob of bytes".to_vec()), ]); // create a collection @@ -61,7 +61,7 @@ async fn main() -> Result<()> { } else { // create a new database and add a blob let (db, names) = - iroh_bytes::store::readonly_mem::Store::new([("hello", b"Hello World!".to_vec())]); + iroh_blobs::store::readonly_mem::Store::new([("hello", b"Hello World!".to_vec())]); // get the hash of the content let hash = names.get("hello").unwrap(); @@ -92,7 +92,7 @@ async fn main() -> Result<()> { // spawn a task to handle the connection tokio::spawn(async move { - iroh_bytes::provider::handle_connection(conn, db, MockEventSender, lp).await + iroh_blobs::provider::handle_connection(conn, db, MockEventSender, lp).await }); } }); @@ -112,8 +112,8 @@ struct MockEventSender; use futures_lite::future::FutureExt; -impl iroh_bytes::provider::EventSender for MockEventSender { - fn send(&self, _event: iroh_bytes::provider::Event) -> futures_lite::future::Boxed<()> { +impl iroh_blobs::provider::EventSender for MockEventSender { + fn send(&self, _event: iroh_blobs::provider::Event) -> futures_lite::future::Boxed<()> { async move {}.boxed() } } diff --git a/iroh-bytes/proptest-regressions/protocol/range_spec.txt b/iroh-blobs/proptest-regressions/protocol/range_spec.txt similarity index 100% rename from iroh-bytes/proptest-regressions/protocol/range_spec.txt rename to iroh-blobs/proptest-regressions/protocol/range_spec.txt diff --git a/iroh-bytes/proptest-regressions/provider.txt b/iroh-blobs/proptest-regressions/provider.txt similarity index 100% rename from iroh-bytes/proptest-regressions/provider.txt rename to iroh-blobs/proptest-regressions/provider.txt diff --git a/iroh-bytes/src/downloader.rs b/iroh-blobs/src/downloader.rs similarity index 100% rename from iroh-bytes/src/downloader.rs rename to iroh-blobs/src/downloader.rs diff --git a/iroh-bytes/src/downloader/get.rs b/iroh-blobs/src/downloader/get.rs similarity index 100% rename from iroh-bytes/src/downloader/get.rs rename to iroh-blobs/src/downloader/get.rs diff --git a/iroh-bytes/src/downloader/invariants.rs b/iroh-blobs/src/downloader/invariants.rs similarity index 100% rename from iroh-bytes/src/downloader/invariants.rs rename to iroh-blobs/src/downloader/invariants.rs diff --git a/iroh-bytes/src/downloader/progress.rs b/iroh-blobs/src/downloader/progress.rs similarity index 100% rename from iroh-bytes/src/downloader/progress.rs rename to iroh-blobs/src/downloader/progress.rs diff --git a/iroh-bytes/src/downloader/test.rs b/iroh-blobs/src/downloader/test.rs similarity index 100% rename from iroh-bytes/src/downloader/test.rs rename to iroh-blobs/src/downloader/test.rs diff --git a/iroh-bytes/src/downloader/test/dialer.rs b/iroh-blobs/src/downloader/test/dialer.rs similarity index 100% rename from iroh-bytes/src/downloader/test/dialer.rs rename to iroh-blobs/src/downloader/test/dialer.rs diff --git a/iroh-bytes/src/downloader/test/getter.rs b/iroh-blobs/src/downloader/test/getter.rs similarity index 100% rename from iroh-bytes/src/downloader/test/getter.rs rename to iroh-blobs/src/downloader/test/getter.rs diff --git a/iroh-bytes/src/export.rs b/iroh-blobs/src/export.rs similarity index 100% rename from iroh-bytes/src/export.rs rename to iroh-blobs/src/export.rs diff --git a/iroh-bytes/src/format.rs b/iroh-blobs/src/format.rs similarity index 100% rename from iroh-bytes/src/format.rs rename to iroh-blobs/src/format.rs diff --git a/iroh-bytes/src/format/collection.rs b/iroh-blobs/src/format/collection.rs similarity index 100% rename from iroh-bytes/src/format/collection.rs rename to iroh-blobs/src/format/collection.rs diff --git a/iroh-bytes/src/get.rs b/iroh-blobs/src/get.rs similarity index 100% rename from iroh-bytes/src/get.rs rename to iroh-blobs/src/get.rs diff --git a/iroh-bytes/src/get/db.rs b/iroh-blobs/src/get/db.rs similarity index 99% rename from iroh-bytes/src/get/db.rs rename to iroh-blobs/src/get/db.rs index 2efa03b6b8..85db7d08bd 100644 --- a/iroh-bytes/src/get/db.rs +++ b/iroh-blobs/src/get/db.rs @@ -1,4 +1,4 @@ -//! Functions that use the iroh-bytes protocol in conjunction with a bao store. +//! Functions that use the iroh-blobs protocol in conjunction with a bao store. use std::future::Future; use std::io; diff --git a/iroh-bytes/src/get/error.rs b/iroh-blobs/src/get/error.rs similarity index 100% rename from iroh-bytes/src/get/error.rs rename to iroh-blobs/src/get/error.rs diff --git a/iroh-bytes/src/get/progress.rs b/iroh-blobs/src/get/progress.rs similarity index 100% rename from iroh-bytes/src/get/progress.rs rename to iroh-blobs/src/get/progress.rs diff --git a/iroh-bytes/src/get/request.rs b/iroh-blobs/src/get/request.rs similarity index 100% rename from iroh-bytes/src/get/request.rs rename to iroh-blobs/src/get/request.rs diff --git a/iroh-bytes/src/hashseq.rs b/iroh-blobs/src/hashseq.rs similarity index 100% rename from iroh-bytes/src/hashseq.rs rename to iroh-blobs/src/hashseq.rs diff --git a/iroh-bytes/src/lib.rs b/iroh-blobs/src/lib.rs similarity index 100% rename from iroh-bytes/src/lib.rs rename to iroh-blobs/src/lib.rs diff --git a/iroh-bytes/src/metrics.rs b/iroh-blobs/src/metrics.rs similarity index 95% rename from iroh-bytes/src/metrics.rs rename to iroh-blobs/src/metrics.rs index 638fd53f87..cdb6d66033 100644 --- a/iroh-bytes/src/metrics.rs +++ b/iroh-blobs/src/metrics.rs @@ -1,4 +1,4 @@ -//! Metrics for iroh-bytes +//! Metrics for iroh-blobs use iroh_metrics::{ core::{Counter, Metric}, @@ -30,6 +30,6 @@ impl Default for Metrics { impl Metric for Metrics { fn name() -> &'static str { - "iroh-bytes" + "iroh-blobs" } } diff --git a/iroh-bytes/src/protocol.rs b/iroh-blobs/src/protocol.rs similarity index 95% rename from iroh-bytes/src/protocol.rs rename to iroh-blobs/src/protocol.rs index 78c7bfa396..82b90b085a 100644 --- a/iroh-bytes/src/protocol.rs +++ b/iroh-blobs/src/protocol.rs @@ -114,8 +114,8 @@ //! [`GetRequest::single`] that only requires the hash of the blob. //! //! ```rust -//! # use iroh_bytes::protocol::GetRequest; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::GetRequest; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let request = GetRequest::single(hash); //! ``` //! @@ -133,8 +133,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let spec = RangeSpecSeq::from_ranges([ChunkRanges::from(..ChunkNum(10))]); //! let request = GetRequest::new(hash, spec); //! ``` @@ -148,8 +148,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let ranges = &ChunkRanges::from(..ChunkNum(10)) | &ChunkRanges::from(ChunkNum(100)..ChunkNum(110)); //! let spec = RangeSpecSeq::from_ranges([ranges]); //! let request = GetRequest::new(hash, spec); @@ -188,8 +188,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let spec = RangeSpecSeq::all(); //! let request = GetRequest::new(hash, spec); //! ``` @@ -213,8 +213,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let spec = RangeSpecSeq::from_ranges([ //! ChunkRanges::empty(), // we don't need the collection itself //! ChunkRanges::empty(), // we don't need the first child either @@ -235,8 +235,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let spec = RangeSpecSeq::from_ranges_infinite([ //! ChunkRanges::all(), // the collection itself //! ChunkRanges::from(..ChunkNum(1)), // the first chunk of each child @@ -251,8 +251,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let hash: iroh_blobs::Hash = [0; 32].into(); //! let spec = RangeSpecSeq::from_ranges([ //! ChunkRanges::empty(), // we don't need the collection itself //! ChunkRanges::empty(), // we don't need the first child either @@ -266,8 +266,8 @@ //! //! ```rust //! # use bao_tree::{ChunkNum, ChunkRanges}; -//! # use iroh_bytes::protocol::{GetRequest, RangeSpecSeq}; -//! # let child_hash: iroh_bytes::Hash = [0; 32].into(); +//! # use iroh_blobs::protocol::{GetRequest, RangeSpecSeq}; +//! # let child_hash: iroh_blobs::Hash = [0; 32].into(); //! let request = GetRequest::single(child_hash); //! ``` //! diff --git a/iroh-bytes/src/protocol/range_spec.rs b/iroh-blobs/src/protocol/range_spec.rs similarity index 100% rename from iroh-bytes/src/protocol/range_spec.rs rename to iroh-blobs/src/protocol/range_spec.rs diff --git a/iroh-bytes/src/provider.rs b/iroh-blobs/src/provider.rs similarity index 100% rename from iroh-bytes/src/provider.rs rename to iroh-blobs/src/provider.rs diff --git a/iroh-bytes/src/store.rs b/iroh-blobs/src/store.rs similarity index 100% rename from iroh-bytes/src/store.rs rename to iroh-blobs/src/store.rs diff --git a/iroh-bytes/src/store/bao_file.rs b/iroh-blobs/src/store/bao_file.rs similarity index 100% rename from iroh-bytes/src/store/bao_file.rs rename to iroh-blobs/src/store/bao_file.rs diff --git a/iroh-bytes/src/store/fs.rs b/iroh-blobs/src/store/fs.rs similarity index 100% rename from iroh-bytes/src/store/fs.rs rename to iroh-blobs/src/store/fs.rs diff --git a/iroh-bytes/src/store/fs/import_flat_store.rs b/iroh-blobs/src/store/fs/import_flat_store.rs similarity index 100% rename from iroh-bytes/src/store/fs/import_flat_store.rs rename to iroh-blobs/src/store/fs/import_flat_store.rs diff --git a/iroh-bytes/src/store/fs/migrate_redb_v1_v2.rs b/iroh-blobs/src/store/fs/migrate_redb_v1_v2.rs similarity index 100% rename from iroh-bytes/src/store/fs/migrate_redb_v1_v2.rs rename to iroh-blobs/src/store/fs/migrate_redb_v1_v2.rs diff --git a/iroh-bytes/src/store/fs/tables.rs b/iroh-blobs/src/store/fs/tables.rs similarity index 100% rename from iroh-bytes/src/store/fs/tables.rs rename to iroh-blobs/src/store/fs/tables.rs diff --git a/iroh-bytes/src/store/fs/test_support.rs b/iroh-blobs/src/store/fs/test_support.rs similarity index 100% rename from iroh-bytes/src/store/fs/test_support.rs rename to iroh-blobs/src/store/fs/test_support.rs diff --git a/iroh-bytes/src/store/fs/tests.rs b/iroh-blobs/src/store/fs/tests.rs similarity index 100% rename from iroh-bytes/src/store/fs/tests.rs rename to iroh-blobs/src/store/fs/tests.rs diff --git a/iroh-bytes/src/store/fs/util.rs b/iroh-blobs/src/store/fs/util.rs similarity index 100% rename from iroh-bytes/src/store/fs/util.rs rename to iroh-blobs/src/store/fs/util.rs diff --git a/iroh-bytes/src/store/fs/validate.rs b/iroh-blobs/src/store/fs/validate.rs similarity index 100% rename from iroh-bytes/src/store/fs/validate.rs rename to iroh-blobs/src/store/fs/validate.rs diff --git a/iroh-bytes/src/store/mem.rs b/iroh-blobs/src/store/mem.rs similarity index 99% rename from iroh-bytes/src/store/mem.rs rename to iroh-blobs/src/store/mem.rs index 47f75e9fd2..7b14b2a14b 100644 --- a/iroh-bytes/src/store/mem.rs +++ b/iroh-blobs/src/store/mem.rs @@ -1,4 +1,4 @@ -//! A full in memory database for iroh-bytes +//! A full in memory database for iroh-blobs //! //! Main entry point is [Store]. use bao_tree::{ @@ -33,7 +33,7 @@ use super::{ ImportProgress, Map, TempCounterMap, }; -/// A fully featured in memory database for iroh-bytes, including support for +/// A fully featured in memory database for iroh-blobs, including support for /// partial blobs. #[derive(Debug, Clone, Default)] pub struct Store { diff --git a/iroh-bytes/src/store/mutable_mem_storage.rs b/iroh-blobs/src/store/mutable_mem_storage.rs similarity index 100% rename from iroh-bytes/src/store/mutable_mem_storage.rs rename to iroh-blobs/src/store/mutable_mem_storage.rs diff --git a/iroh-bytes/src/store/readonly_mem.rs b/iroh-blobs/src/store/readonly_mem.rs similarity index 98% rename from iroh-bytes/src/store/readonly_mem.rs rename to iroh-blobs/src/store/readonly_mem.rs index d2385aa7a2..4b77698313 100644 --- a/iroh-bytes/src/store/readonly_mem.rs +++ b/iroh-blobs/src/store/readonly_mem.rs @@ -1,4 +1,4 @@ -//! A readonly in memory database for iroh-bytes, usable for testing and sharing static data. +//! A readonly in memory database for iroh-blobs, usable for testing and sharing static data. //! //! Main entry point is [Store]. use std::{ @@ -30,7 +30,7 @@ use tokio::io::AsyncWriteExt; use super::{BaoBatchWriter, BaoBlobSize, ConsistencyCheckProgress, DbIter, ExportProgressCb}; -/// A readonly in memory database for iroh-bytes. +/// A readonly in memory database for iroh-blobs. /// /// This is basically just a HashMap, so it does not allow for any modifications /// unless you have a mutable reference to it. diff --git a/iroh-bytes/src/store/traits.rs b/iroh-blobs/src/store/traits.rs similarity index 100% rename from iroh-bytes/src/store/traits.rs rename to iroh-blobs/src/store/traits.rs diff --git a/iroh-bytes/src/util.rs b/iroh-blobs/src/util.rs similarity index 100% rename from iroh-bytes/src/util.rs rename to iroh-blobs/src/util.rs diff --git a/iroh-bytes/src/util/io.rs b/iroh-blobs/src/util/io.rs similarity index 100% rename from iroh-bytes/src/util/io.rs rename to iroh-blobs/src/util/io.rs diff --git a/iroh-bytes/src/util/mem_or_file.rs b/iroh-blobs/src/util/mem_or_file.rs similarity index 100% rename from iroh-bytes/src/util/mem_or_file.rs rename to iroh-blobs/src/util/mem_or_file.rs diff --git a/iroh-bytes/src/util/progress.rs b/iroh-blobs/src/util/progress.rs similarity index 100% rename from iroh-bytes/src/util/progress.rs rename to iroh-blobs/src/util/progress.rs diff --git a/iroh-bytes/src/util/sparse_mem_file.rs b/iroh-blobs/src/util/sparse_mem_file.rs similarity index 100% rename from iroh-bytes/src/util/sparse_mem_file.rs rename to iroh-blobs/src/util/sparse_mem_file.rs diff --git a/iroh-cli/src/commands/author.rs b/iroh-cli/src/commands/author.rs index eed5e8bab0..1499a523c7 100644 --- a/iroh-cli/src/commands/author.rs +++ b/iroh-cli/src/commands/author.rs @@ -5,7 +5,7 @@ use futures_lite::StreamExt; use iroh::base::base32::fmt_short; use iroh::client::{Iroh, RpcService}; -use iroh::sync::{Author, AuthorId}; +use iroh::docs::{Author, AuthorId}; use quic_rpc::ServiceConnection; use crate::config::ConsoleEnv; diff --git a/iroh-cli/src/commands/blob.rs b/iroh-cli/src/commands/blob.rs index ed26789877..2a0aeeab6a 100644 --- a/iroh-cli/src/commands/blob.rs +++ b/iroh-cli/src/commands/blob.rs @@ -16,7 +16,7 @@ use indicatif::{ use iroh::{ base::node_addr::AddrInfoOptions, base::ticket::BlobTicket, - bytes::{ + blobs::{ get::{db::DownloadProgress, progress::BlobProgress, Stats}, provider::AddProgress, store::{ diff --git a/iroh-cli/src/commands/doc.rs b/iroh-cli/src/commands/doc.rs index fcdb3af5c0..9b27fc6aca 100644 --- a/iroh-cli/src/commands/doc.rs +++ b/iroh-cli/src/commands/doc.rs @@ -18,13 +18,13 @@ use tokio::io::AsyncReadExt; use iroh::{ base::{base32::fmt_short, node_addr::AddrInfoOptions}, - bytes::{provider::AddProgress, util::SetTagOption, Hash, Tag}, + blobs::{provider::AddProgress, util::SetTagOption, Hash, Tag}, client::{ blobs::WrapOption, docs::{Doc, Entry, LiveEvent, Origin, ShareMode}, Iroh, RpcService, }, - sync::{ + docs::{ store::{DownloadPolicy, FilterKind, Query, SortDirection}, AuthorId, DocTicket, NamespaceId, }, @@ -293,7 +293,7 @@ pub enum Sorting { /// Sort by key, then author Key, } -impl From for iroh::sync::store::SortBy { +impl From for iroh::docs::store::SortBy { fn from(value: Sorting) -> Self { match value { Sorting::Author => Self::AuthorKey, @@ -558,16 +558,16 @@ impl DocCommands { content_status, } => { let content = match content_status { - iroh::sync::ContentStatus::Complete => { + iroh::docs::ContentStatus::Complete => { fmt_entry(&doc, &entry, DisplayContentMode::Auto).await } - iroh::sync::ContentStatus::Incomplete => { + iroh::docs::ContentStatus::Incomplete => { let (Ok(content) | Err(content)) = fmt_content(&doc, &entry, DisplayContentMode::ShortHash) .await; format!("", content, human_len(&entry)) } - iroh::sync::ContentStatus::Missing => { + iroh::docs::ContentStatus::Missing => { let (Ok(content) | Err(content)) = fmt_content(&doc, &entry, DisplayContentMode::ShortHash) .await; diff --git a/iroh-cli/src/commands/doctor.rs b/iroh-cli/src/commands/doctor.rs index 93c491186f..06ee1ba504 100644 --- a/iroh-cli/src/commands/doctor.rs +++ b/iroh-cli/src/commands/doctor.rs @@ -20,10 +20,11 @@ use futures_lite::StreamExt; use indicatif::{HumanBytes, MultiProgress, ProgressBar}; use iroh::{ base::ticket::{BlobTicket, Ticket}, - bytes::{ + blobs::{ store::{ReadableStore, Store as _}, util::progress::{FlumeProgressSender, ProgressSender}, }, + docs::{Capability, DocTicket}, net::{ defaults::DEFAULT_RELAY_STUN_PORT, discovery::{ @@ -37,7 +38,6 @@ use iroh::{ util::AbortingJoinHandle, MagicEndpoint, NodeAddr, NodeId, }, - sync::{Capability, DocTicket}, util::{path::IrohPaths, progress::ProgressWriter}, }; use portable_atomic::AtomicU64; @@ -191,7 +191,7 @@ pub enum Commands { BlobConsistencyCheck { /// Path of the blob store to validate. For iroh, this is the blobs subdirectory /// in the iroh data directory. But this can also be used for apps that embed - /// just iroh-bytes. + /// just iroh-blobs. path: PathBuf, /// Try to get the store into a consistent state by removing orphaned data /// and broken entries. @@ -204,7 +204,7 @@ pub enum Commands { BlobValidate { /// Path of the blob store to validate. For iroh, this is the blobs subdirectory /// in the iroh data directory. But this can also be used for apps that embed - /// just iroh-bytes. + /// just iroh-blobs. path: PathBuf, /// Try to get the store into a consistent state by downgrading entries from /// complete to partial if data is missing etc. @@ -1098,7 +1098,7 @@ pub async fn run(command: Commands, config: &NodeConfig) -> anyhow::Result<()> { } Commands::TicketInspect { ticket, zbase32 } => inspect_ticket(&ticket, zbase32), Commands::BlobConsistencyCheck { path, repair } => { - let blob_store = iroh::bytes::store::fs::Store::load(path).await?; + let blob_store = iroh::blobs::store::fs::Store::load(path).await?; let (send, recv) = flume::bounded(1); let task = tokio::spawn(async move { while let Ok(msg) = recv.recv_async().await { @@ -1112,7 +1112,7 @@ pub async fn run(command: Commands, config: &NodeConfig) -> anyhow::Result<()> { Ok(()) } Commands::BlobValidate { path, repair } => { - let blob_store = iroh::bytes::store::fs::Store::load(path).await?; + let blob_store = iroh::blobs::store::fs::Store::load(path).await?; let (send, recv) = flume::bounded(1); let task = tokio::spawn(async move { while let Ok(msg) = recv.recv_async().await { diff --git a/iroh-cli/src/commands/start.rs b/iroh-cli/src/commands/start.rs index 19473ae7b0..ec22c26de8 100644 --- a/iroh-cli/src/commands/start.rs +++ b/iroh-cli/src/commands/start.rs @@ -116,7 +116,7 @@ where pub(crate) async fn start_node( iroh_data_root: &Path, relay_map: Option, -) -> Result> { +) -> Result> { let rpc_status = RpcStatus::load(iroh_data_root).await?; match rpc_status { RpcStatus::Running { port, .. } => { @@ -141,7 +141,7 @@ pub(crate) async fn start_node( .await } -fn welcome_message(node: &Node) -> Result { +fn welcome_message(node: &Node) -> Result { let msg = format!( "{}\nNode ID: {}\n", "Iroh is running".green(), diff --git a/iroh-cli/src/commands/tag.rs b/iroh-cli/src/commands/tag.rs index 0992d9abad..3d995d5a52 100644 --- a/iroh-cli/src/commands/tag.rs +++ b/iroh-cli/src/commands/tag.rs @@ -2,7 +2,7 @@ use anyhow::Result; use bytes::Bytes; use clap::Subcommand; use futures_lite::StreamExt; -use iroh::bytes::Tag; +use iroh::blobs::Tag; use iroh::client::{Iroh, RpcService}; use quic_rpc::ServiceConnection; diff --git a/iroh-cli/src/config.rs b/iroh-cli/src/config.rs index 9a323b6c50..a2bc03fc1d 100644 --- a/iroh-cli/src/config.rs +++ b/iroh-cli/src/config.rs @@ -9,12 +9,12 @@ use std::{ }; use anyhow::{anyhow, bail, Context, Result}; +use iroh::docs::{AuthorId, NamespaceId}; use iroh::net::{ defaults::{default_eu_relay_node, default_na_relay_node}, relay::{RelayMap, RelayNode}, }; use iroh::node::GcPolicy; -use iroh::sync::{AuthorId, NamespaceId}; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; diff --git a/iroh-cli/tests/cli.rs b/iroh-cli/tests/cli.rs index e08e349abc..ae6f2993a0 100644 --- a/iroh-cli/tests/cli.rs +++ b/iroh-cli/tests/cli.rs @@ -12,7 +12,7 @@ use bao_tree::blake3; use duct::{cmd, ReaderHandle}; use iroh::{ base::ticket::BlobTicket, - bytes::{Hash, HashAndFormat}, + blobs::{Hash, HashAndFormat}, util::path::IrohPaths, }; use rand::distributions::{Alphanumeric, DistString}; @@ -112,7 +112,7 @@ fn cli_provide_tree() -> Result<()> { #[test] #[ignore = "flaky"] fn cli_provide_tree_resume() -> Result<()> { - use iroh_bytes::store::file::test_support::{make_partial, MakePartialResult}; + use iroh_blobs::store::file::test_support::{make_partial, MakePartialResult}; /// Get all matches for match group 1 (an explicitly defined match group) fn explicit_matches(matches: Vec<(usize, Vec)>) -> Vec { @@ -218,7 +218,7 @@ fn cli_provide_tree_resume() -> Result<()> { #[test] #[ignore = "flaky"] fn cli_provide_file_resume() -> Result<()> { - use iroh_bytes::store::file::test_support::{make_partial, MakePartialResult}; + use iroh_blobs::store::file::test_support::{make_partial, MakePartialResult}; /// Get all matches for match group 1 (an explicitly defined match group) fn explicit_matches(matches: Vec<(usize, Vec)>) -> Vec { @@ -391,7 +391,7 @@ fn cli_bao_store_migration() -> anyhow::Result<()> { #[tokio::test] #[ignore = "flaky"] async fn cli_provide_persistence() -> anyhow::Result<()> { - use iroh::bytes::store::ReadableStore; + use iroh::blobs::store::ReadableStore; use nix::{ sys::signal::{self, Signal}, unistd::Pid, @@ -447,14 +447,14 @@ async fn cli_provide_persistence() -> anyhow::Result<()> { provide(&foo_path)?; // should have some data now let db_path = IrohPaths::BaoStoreDir.with_root(&iroh_data_dir); - let db = iroh::bytes::store::fs::Store::load(&db_path).await?; + let db = iroh::blobs::store::fs::Store::load(&db_path).await?; let blobs: Vec> = db.blobs().await.unwrap().collect::>(); drop(db); assert_eq!(blobs.len(), 3); provide(&bar_path)?; // should have more data now - let db = iroh::bytes::store::fs::Store::load(&db_path).await?; + let db = iroh::blobs::store::fs::Store::load(&db_path).await?; let blobs = db.blobs().await.unwrap().collect::>(); drop(db); assert_eq!(blobs.len(), 6); diff --git a/iroh-sync/Cargo.toml b/iroh-docs/Cargo.toml similarity index 99% rename from iroh-sync/Cargo.toml rename to iroh-docs/Cargo.toml index 01279cf3e2..aa9b9839f8 100644 --- a/iroh-sync/Cargo.toml +++ b/iroh-docs/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "iroh-sync" +name = "iroh-docs" version = "0.15.0" edition = "2021" readme = "README.md" diff --git a/iroh-sync/LICENSE-APACHE b/iroh-docs/LICENSE-APACHE similarity index 100% rename from iroh-sync/LICENSE-APACHE rename to iroh-docs/LICENSE-APACHE diff --git a/iroh-sync/LICENSE-MIT b/iroh-docs/LICENSE-MIT similarity index 100% rename from iroh-sync/LICENSE-MIT rename to iroh-docs/LICENSE-MIT diff --git a/iroh-sync/README.md b/iroh-docs/README.md similarity index 99% rename from iroh-sync/README.md rename to iroh-docs/README.md index 4e63a49f10..8867860605 100644 --- a/iroh-sync/README.md +++ b/iroh-docs/README.md @@ -1,4 +1,4 @@ -# iroh-sync +# iroh-docs Multi-dimensional key-value documents with an efficient synchronization protocol. diff --git a/iroh-sync/proptest-regressions/ranger.txt b/iroh-docs/proptest-regressions/ranger.txt similarity index 100% rename from iroh-sync/proptest-regressions/ranger.txt rename to iroh-docs/proptest-regressions/ranger.txt diff --git a/iroh-sync/src/actor.rs b/iroh-docs/src/actor.rs similarity index 99% rename from iroh-sync/src/actor.rs rename to iroh-docs/src/actor.rs index 7a4f7c9960..bbe91181cb 100644 --- a/iroh-sync/src/actor.rs +++ b/iroh-docs/src/actor.rs @@ -543,7 +543,7 @@ impl SyncHandle { self.tx .send_async(action) .await - .context("sending to iroh_sync actor failed")?; + .context("sending to iroh_docs actor failed")?; Ok(()) } async fn send_replica(&self, namespace: NamespaceId, action: ReplicaAction) -> Result<()> { diff --git a/iroh-sync/src/heads.rs b/iroh-docs/src/heads.rs similarity index 100% rename from iroh-sync/src/heads.rs rename to iroh-docs/src/heads.rs diff --git a/iroh-sync/src/keys.rs b/iroh-docs/src/keys.rs similarity index 99% rename from iroh-sync/src/keys.rs rename to iroh-docs/src/keys.rs index 622f28ede5..9efbeee560 100644 --- a/iroh-sync/src/keys.rs +++ b/iroh-docs/src/keys.rs @@ -1,4 +1,4 @@ -//! Keys used in iroh-sync +//! Keys used in iroh-docs use std::{cmp::Ordering, fmt, str::FromStr}; diff --git a/iroh-sync/src/lib.rs b/iroh-docs/src/lib.rs similarity index 100% rename from iroh-sync/src/lib.rs rename to iroh-docs/src/lib.rs diff --git a/iroh-sync/src/metrics.rs b/iroh-docs/src/metrics.rs similarity index 95% rename from iroh-sync/src/metrics.rs rename to iroh-docs/src/metrics.rs index f7c6c1b9f8..90d562495c 100644 --- a/iroh-sync/src/metrics.rs +++ b/iroh-docs/src/metrics.rs @@ -1,11 +1,11 @@ -//! Metrics for iroh-sync +//! Metrics for iroh-docs use iroh_metrics::{ core::{Counter, Metric}, struct_iterable::Iterable, }; -/// Metrics for iroh-sync +/// Metrics for iroh-docs #[allow(missing_docs)] #[derive(Debug, Clone, Iterable)] pub struct Metrics { @@ -36,6 +36,6 @@ impl Default for Metrics { impl Metric for Metrics { fn name() -> &'static str { - "iroh_sync" + "iroh_docs" } } diff --git a/iroh-sync/src/net.rs b/iroh-docs/src/net.rs similarity index 97% rename from iroh-sync/src/net.rs rename to iroh-docs/src/net.rs index 30bcfe8c82..aff9046548 100644 --- a/iroh-sync/src/net.rs +++ b/iroh-docs/src/net.rs @@ -1,4 +1,4 @@ -//! Network implementation of the iroh-sync protocol +//! Network implementation of the iroh-docs protocol use std::{ future::Future, @@ -20,8 +20,8 @@ use crate::metrics::Metrics; #[cfg(feature = "metrics")] use iroh_metrics::inc; -/// The ALPN identifier for the iroh-sync protocol -pub const SYNC_ALPN: &[u8] = b"/iroh-sync/1"; +/// The ALPN identifier for the iroh-docs protocol +pub const DOCS_ALPN: &[u8] = b"/iroh-sync/1"; mod codec; @@ -36,7 +36,7 @@ pub async fn connect_and_sync( let peer_id = peer.node_id; trace!("connect"); let connection = endpoint - .connect(peer, SYNC_ALPN) + .connect(peer, DOCS_ALPN) .await .map_err(ConnectError::connect)?; @@ -103,7 +103,7 @@ pub enum AcceptOutcome { Reject(AbortReason), } -/// Handle an iroh-sync connection and sync all shared documents in the replica store. +/// Handle an iroh-docs connection and sync all shared documents in the replica store. pub async fn handle_connection( sync: SyncHandle, connecting: quinn::Connecting, diff --git a/iroh-sync/src/net/codec.rs b/iroh-docs/src/net/codec.rs similarity index 100% rename from iroh-sync/src/net/codec.rs rename to iroh-docs/src/net/codec.rs diff --git a/iroh-sync/src/ranger.rs b/iroh-docs/src/ranger.rs similarity index 100% rename from iroh-sync/src/ranger.rs rename to iroh-docs/src/ranger.rs diff --git a/iroh-sync/src/store.rs b/iroh-docs/src/store.rs similarity index 99% rename from iroh-sync/src/store.rs rename to iroh-docs/src/store.rs index 488d45657e..3f137be371 100644 --- a/iroh-sync/src/store.rs +++ b/iroh-docs/src/store.rs @@ -1,4 +1,4 @@ -//! Storage trait and implementation for iroh-sync documents +//! Storage trait and implementation for iroh-docs documents use std::num::NonZeroUsize; use anyhow::Result; diff --git a/iroh-sync/src/store/fs.rs b/iroh-docs/src/store/fs.rs similarity index 100% rename from iroh-sync/src/store/fs.rs rename to iroh-docs/src/store/fs.rs diff --git a/iroh-sync/src/store/fs/bounds.rs b/iroh-docs/src/store/fs/bounds.rs similarity index 100% rename from iroh-sync/src/store/fs/bounds.rs rename to iroh-docs/src/store/fs/bounds.rs diff --git a/iroh-sync/src/store/fs/migrate_v1_v2.rs b/iroh-docs/src/store/fs/migrate_v1_v2.rs similarity index 100% rename from iroh-sync/src/store/fs/migrate_v1_v2.rs rename to iroh-docs/src/store/fs/migrate_v1_v2.rs diff --git a/iroh-sync/src/store/fs/migrations.rs b/iroh-docs/src/store/fs/migrations.rs similarity index 100% rename from iroh-sync/src/store/fs/migrations.rs rename to iroh-docs/src/store/fs/migrations.rs diff --git a/iroh-sync/src/store/fs/query.rs b/iroh-docs/src/store/fs/query.rs similarity index 100% rename from iroh-sync/src/store/fs/query.rs rename to iroh-docs/src/store/fs/query.rs diff --git a/iroh-sync/src/store/fs/ranges.rs b/iroh-docs/src/store/fs/ranges.rs similarity index 100% rename from iroh-sync/src/store/fs/ranges.rs rename to iroh-docs/src/store/fs/ranges.rs diff --git a/iroh-sync/src/store/fs/tables.rs b/iroh-docs/src/store/fs/tables.rs similarity index 100% rename from iroh-sync/src/store/fs/tables.rs rename to iroh-docs/src/store/fs/tables.rs diff --git a/iroh-sync/src/store/pubkeys.rs b/iroh-docs/src/store/pubkeys.rs similarity index 100% rename from iroh-sync/src/store/pubkeys.rs rename to iroh-docs/src/store/pubkeys.rs diff --git a/iroh-sync/src/store/util.rs b/iroh-docs/src/store/util.rs similarity index 100% rename from iroh-sync/src/store/util.rs rename to iroh-docs/src/store/util.rs diff --git a/iroh-sync/src/sync.rs b/iroh-docs/src/sync.rs similarity index 99% rename from iroh-sync/src/sync.rs rename to iroh-docs/src/sync.rs index c2cd2e0ed6..5d3896f4bc 100644 --- a/iroh-sync/src/sync.rs +++ b/iroh-docs/src/sync.rs @@ -1,4 +1,4 @@ -//! API for iroh-sync replicas +//! API for iroh-docs replicas // Names and concepts are roughly based on Willows design at the moment: // @@ -37,7 +37,7 @@ use crate::{ pub type ProtocolMessage = crate::ranger::Message; /// Byte representation of a `PeerId` from `iroh-net`. -// TODO: PeerId is in iroh-net which iroh-sync doesn't depend on. Add iroh-base crate with `PeerId`. +// TODO: PeerId is in iroh-net which iroh-docs doesn't depend on. Add iroh-base crate with `PeerId`. pub type PeerIdBytes = [u8; 32]; /// Max time in the future from our wall clock time that we accept entries for. diff --git a/iroh-sync/src/ticket.rs b/iroh-docs/src/ticket.rs similarity index 98% rename from iroh-sync/src/ticket.rs rename to iroh-docs/src/ticket.rs index b9cdad2c7c..b54c2e4592 100644 --- a/iroh-sync/src/ticket.rs +++ b/iroh-docs/src/ticket.rs @@ -1,4 +1,4 @@ -//! Tickets for [`iroh-sync`] documents. +//! Tickets for [`iroh-docs`] documents. use iroh_base::ticket; use iroh_net::NodeAddr; diff --git a/iroh-gossip/Cargo.toml b/iroh-gossip/Cargo.toml index 8cb0ef47f3..97d55de4a8 100644 --- a/iroh-gossip/Cargo.toml +++ b/iroh-gossip/Cargo.toml @@ -6,7 +6,7 @@ readme = "README.md" description = "gossip messages over broadcast trees" license = "MIT/Apache-2.0" authors = ["n0 team"] -repository = "https://github.com/n0-computer/iroh-sync" +repository = "https://github.com/n0-computer/iroh" # Sadly this also needs to be updated in .github/workflows/ci.yml rust-version = "1.75" diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 03c0f71156..22120e707f 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -26,14 +26,14 @@ futures-lite = "2.3" futures-util = "0.3" genawaiter = { version = "0.99", default-features = false, features = ["futures03"] } hex = { version = "0.4.3" } -iroh-bytes = { version = "0.15.0", path = "../iroh-bytes", features = ["downloader"] } +iroh-blobs = { version = "0.15.0", path = "../iroh-blobs", features = ["downloader"] } iroh-base = { version = "0.15.0", path = "../iroh-base", features = ["key"] } iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.15.0", path = "../iroh-metrics", optional = true } iroh-net = { version = "0.15.0", path = "../iroh-net" } num_cpus = { version = "1.15.0" } portable-atomic = "1" -iroh-sync = { version = "0.15.0", path = "../iroh-sync" } +iroh-docs = { version = "0.15.0", path = "../iroh-docs" } iroh-gossip = { version = "0.15.0", path = "../iroh-gossip" } parking_lot = "0.12.1" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } @@ -56,8 +56,8 @@ indicatif = { version = "0.17", features = ["tokio"], optional = true } [features] default = ["metrics", "fs-store"] -metrics = ["iroh-metrics", "iroh-bytes/metrics"] -fs-store = ["iroh-bytes/fs-store"] +metrics = ["iroh-metrics", "iroh-blobs/metrics"] +fs-store = ["iroh-blobs/fs-store"] test = [] examples = ["dep:clap", "dep:indicatif"] test-utils = ["iroh-net/test-utils"] diff --git a/iroh/examples/client.rs b/iroh/examples/client.rs index 0d488c42c2..ea8b5bce58 100644 --- a/iroh/examples/client.rs +++ b/iroh/examples/client.rs @@ -6,7 +6,7 @@ //! run this example from the project root: //! $ cargo run --example client use indicatif::HumanBytes; -use iroh::{base::base32, client::docs::Entry, node::Node, sync::store::Query}; +use iroh::{base::base32, client::docs::Entry, docs::store::Query, node::Node}; use tokio_stream::StreamExt; #[tokio::main] diff --git a/iroh/examples/collection-fetch.rs b/iroh/examples/collection-fetch.rs index 88a8598c47..e35f61ba95 100644 --- a/iroh/examples/collection-fetch.rs +++ b/iroh/examples/collection-fetch.rs @@ -6,7 +6,7 @@ use std::{env, str::FromStr}; use anyhow::{bail, ensure, Context, Result}; -use iroh::{base::ticket::BlobTicket, bytes::BlobFormat}; +use iroh::{base::ticket::BlobTicket, blobs::BlobFormat}; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info diff --git a/iroh/examples/collection-provide.rs b/iroh/examples/collection-provide.rs index 5f12a7b6c9..c6eb7f9fbf 100644 --- a/iroh/examples/collection-provide.rs +++ b/iroh/examples/collection-provide.rs @@ -6,7 +6,7 @@ //! This is using an in memory database and a random node id. //! run this example from the project root: //! $ cargo run --example collection-provide -use iroh::bytes::{format::collection::Collection, util::SetTagOption, BlobFormat}; +use iroh::blobs::{format::collection::Collection, util::SetTagOption, BlobFormat}; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info diff --git a/iroh/examples/hello-world-fetch.rs b/iroh/examples/hello-world-fetch.rs index 9470d39756..71672845a8 100644 --- a/iroh/examples/hello-world-fetch.rs +++ b/iroh/examples/hello-world-fetch.rs @@ -6,7 +6,7 @@ use std::{env, str::FromStr}; use anyhow::{bail, ensure, Context, Result}; -use iroh::{base::ticket::BlobTicket, bytes::BlobFormat}; +use iroh::{base::ticket::BlobTicket, blobs::BlobFormat}; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info diff --git a/iroh/examples/rpc.rs b/iroh/examples/rpc.rs index 1d1aa9550f..f3abdafc52 100644 --- a/iroh/examples/rpc.rs +++ b/iroh/examples/rpc.rs @@ -8,7 +8,7 @@ //! The `node stats` command will reach out over RPC to the node constructed in the example use clap::Parser; -use iroh_bytes::store::Store; +use iroh_blobs::store::Store; use tracing_subscriber::{prelude::*, EnvFilter}; // set the RUST_LOG env var to one of {debug,info,warn} to see logging info diff --git a/iroh/src/client/authors.rs b/iroh/src/client/authors.rs index 8d0b5b69a2..690ae228da 100644 --- a/iroh/src/client/authors.rs +++ b/iroh/src/client/authors.rs @@ -2,7 +2,7 @@ use anyhow::Result; use futures_lite::{stream::StreamExt, Stream}; -use iroh_sync::{Author, AuthorId}; +use iroh_docs::{Author, AuthorId}; use quic_rpc::{RpcClient, ServiceConnection}; use crate::rpc_protocol::{ diff --git a/iroh/src/client/blobs.rs b/iroh/src/client/blobs.rs index 9130a701e9..61d075e7fc 100644 --- a/iroh/src/client/blobs.rs +++ b/iroh/src/client/blobs.rs @@ -14,7 +14,7 @@ use bytes::Bytes; use futures_lite::{Stream, StreamExt}; use futures_util::SinkExt; use iroh_base::{node_addr::AddrInfoOptions, ticket::BlobTicket}; -use iroh_bytes::{ +use iroh_blobs::{ export::ExportProgress as BytesExportProgress, format::collection::Collection, get::db::DownloadProgress as BytesDownloadProgress, @@ -453,7 +453,7 @@ pub struct IncompleteBlobInfo { pub struct AddProgress { #[debug(skip)] stream: Pin< - Box> + Send + Unpin + 'static>, + Box> + Send + Unpin + 'static>, >, current_total_size: Arc, } @@ -461,7 +461,7 @@ pub struct AddProgress { impl AddProgress { fn new( stream: (impl Stream< - Item = Result, impl Into>, + Item = Result, impl Into>, > + Send + Unpin + 'static), @@ -471,7 +471,7 @@ impl AddProgress { let stream = stream.map(move |item| match item { Ok(item) => { let item = item.into(); - if let iroh_bytes::provider::AddProgress::Found { size, .. } = &item { + if let iroh_blobs::provider::AddProgress::Found { size, .. } = &item { total_size.fetch_add(*size, Ordering::Relaxed); } Ok(item) @@ -496,7 +496,7 @@ impl AddProgress { } impl Stream for AddProgress { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.stream).poll_next(cx) } @@ -514,7 +514,7 @@ impl Future for AddProgress { } Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), Poll::Ready(Some(Ok(msg))) => match msg { - iroh_bytes::provider::AddProgress::AllDone { hash, format, tag } => { + iroh_blobs::provider::AddProgress::AllDone { hash, format, tag } => { let outcome = AddOutcome { hash, format, @@ -523,7 +523,7 @@ impl Future for AddProgress { }; return Poll::Ready(Ok(outcome)); } - iroh_bytes::provider::AddProgress::Abort(err) => { + iroh_blobs::provider::AddProgress::Abort(err) => { return Poll::Ready(Err(err.into())); } _ => {} @@ -541,7 +541,7 @@ pub struct DownloadOutcome { /// The size of the data we downloaded from the network pub downloaded_size: u64, /// Statistics about the download - pub stats: iroh_bytes::get::Stats, + pub stats: iroh_blobs::get::Stats, } /// Progress stream for blob download operations. diff --git a/iroh/src/client/docs.rs b/iroh/src/client/docs.rs index 9c4229325d..15ccfaa2fa 100644 --- a/iroh/src/client/docs.rs +++ b/iroh/src/client/docs.rs @@ -12,13 +12,13 @@ use bytes::Bytes; use derive_more::{Display, FromStr}; use futures_lite::{Stream, StreamExt}; use iroh_base::{key::PublicKey, node_addr::AddrInfoOptions}; -use iroh_bytes::{export::ExportProgress, store::ExportMode, Hash}; -use iroh_net::NodeAddr; -use iroh_sync::{ +use iroh_blobs::{export::ExportProgress, store::ExportMode, Hash}; +use iroh_docs::{ actor::OpenState, store::{DownloadPolicy, Query}, AuthorId, CapabilityKind, ContentStatus, DocTicket, NamespaceId, PeerIdBytes, RecordIdentifier, }; +use iroh_net::NodeAddr; use portable_atomic::{AtomicBool, Ordering}; use quic_rpc::{message::RpcMsg, RpcClient, ServiceConnection}; use serde::{Deserialize, Serialize}; @@ -33,7 +33,7 @@ use crate::rpc_protocol::{ }; #[doc(inline)] -pub use crate::sync_engine::{Origin, SyncEvent, SyncReason}; +pub use crate::docs_engine::{Origin, SyncEvent, SyncReason}; use super::{blobs, flatten}; @@ -395,16 +395,16 @@ impl<'a, C: ServiceConnection> From<&'a Doc> for &'a RpcClient for Entry { - fn from(value: iroh_sync::Entry) -> Self { +impl From for Entry { + fn from(value: iroh_docs::Entry) -> Self { Self(value) } } -impl From for Entry { - fn from(value: iroh_sync::SignedEntry) -> Self { +impl From for Entry { + fn from(value: iroh_docs::SignedEntry) -> Self { Self(value.into()) } } @@ -509,13 +509,13 @@ pub enum LiveEvent { SyncFinished(SyncEvent), } -impl From for LiveEvent { - fn from(event: crate::sync_engine::LiveEvent) -> LiveEvent { +impl From for LiveEvent { + fn from(event: crate::docs_engine::LiveEvent) -> LiveEvent { match event { - crate::sync_engine::LiveEvent::InsertLocal { entry } => Self::InsertLocal { + crate::docs_engine::LiveEvent::InsertLocal { entry } => Self::InsertLocal { entry: entry.into(), }, - crate::sync_engine::LiveEvent::InsertRemote { + crate::docs_engine::LiveEvent::InsertRemote { from, entry, content_status, @@ -524,10 +524,10 @@ impl From for LiveEvent { content_status, entry: entry.into(), }, - crate::sync_engine::LiveEvent::ContentReady { hash } => Self::ContentReady { hash }, - crate::sync_engine::LiveEvent::NeighborUp(node) => Self::NeighborUp(node), - crate::sync_engine::LiveEvent::NeighborDown(node) => Self::NeighborDown(node), - crate::sync_engine::LiveEvent::SyncFinished(details) => Self::SyncFinished(details), + crate::docs_engine::LiveEvent::ContentReady { hash } => Self::ContentReady { hash }, + crate::docs_engine::LiveEvent::NeighborUp(node) => Self::NeighborUp(node), + crate::docs_engine::LiveEvent::NeighborDown(node) => Self::NeighborDown(node), + crate::docs_engine::LiveEvent::SyncFinished(details) => Self::SyncFinished(details), } } } diff --git a/iroh/src/client/tags.rs b/iroh/src/client/tags.rs index b16a9c187c..c2d4309977 100644 --- a/iroh/src/client/tags.rs +++ b/iroh/src/client/tags.rs @@ -2,7 +2,7 @@ use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use iroh_bytes::{BlobFormat, Hash, Tag}; +use iroh_blobs::{BlobFormat, Hash, Tag}; use quic_rpc::{RpcClient, ServiceConnection}; use serde::{Deserialize, Serialize}; diff --git a/iroh/src/sync_engine.rs b/iroh/src/docs_engine.rs similarity index 91% rename from iroh/src/sync_engine.rs rename to iroh/src/docs_engine.rs index ec21f8be0a..85f9ed4470 100644 --- a/iroh/src/sync_engine.rs +++ b/iroh/src/docs_engine.rs @@ -1,17 +1,17 @@ -//! Handlers and actors to for live syncing [`iroh_sync`] replicas. +//! Handlers and actors to for live syncing [`iroh_docs`] replicas. //! -//! [`iroh_sync::Replica`] is also called documents here. +//! [`iroh_docs::Replica`] is also called documents here. use std::{io, sync::Arc}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use iroh_bytes::downloader::Downloader; -use iroh_bytes::{store::EntryStatus, Hash}; +use iroh_blobs::downloader::Downloader; +use iroh_blobs::{store::EntryStatus, Hash}; +use iroh_docs::{actor::SyncHandle, ContentStatus, ContentStatusCallback, Entry, NamespaceId}; use iroh_gossip::net::Gossip; use iroh_net::util::SharedAbortingJoinHandle; use iroh_net::{key::PublicKey, MagicEndpoint, NodeAddr}; -use iroh_sync::{actor::SyncHandle, ContentStatus, ContentStatusCallback, Entry, NamespaceId}; use serde::{Deserialize, Serialize}; use tokio::sync::{mpsc, oneshot}; use tracing::{error, error_span, Instrument}; @@ -29,16 +29,16 @@ pub use self::state::{Origin, SyncReason}; /// Capacity of the channel for the [`ToLiveActor`] messages. const ACTOR_CHANNEL_CAP: usize = 64; -/// Capacity for the channels for [`SyncEngine::subscribe`]. +/// Capacity for the channels for [`Engine::subscribe`]. const SUBSCRIBE_CHANNEL_CAP: usize = 256; /// The sync engine coordinates actors that manage open documents, set-reconciliation syncs with /// peers and a gossip swarm for each syncing document. /// -/// The RPC methods dealing with documents and sync operate on the `SyncEngine`, with method +/// The RPC methods dealing with documents and sync operate on the `Engine`, with method /// implementations in [rpc]. #[derive(derive_more::Debug, Clone)] -pub struct SyncEngine { +pub struct Engine { pub(crate) endpoint: MagicEndpoint, pub(crate) sync: SyncHandle, to_live_actor: mpsc::Sender, @@ -48,15 +48,15 @@ pub struct SyncEngine { content_status_cb: ContentStatusCallback, } -impl SyncEngine { +impl Engine { /// Start the sync engine. /// /// This will spawn two tokio tasks for the live sync coordination and gossip actors, and a - /// thread for the [`iroh_sync::actor::SyncHandle`]. - pub(crate) fn spawn( + /// thread for the [`iroh_docs::actor::SyncHandle`]. + pub(crate) fn spawn( endpoint: MagicEndpoint, gossip: Gossip, - replica_store: iroh_sync::store::Store, + replica_store: iroh_docs::store::Store, bao_store: B, downloader: Downloader, ) -> Self { @@ -175,7 +175,7 @@ impl SyncEngine { Ok(a.or(b)) } - /// Handle an incoming iroh-sync connection. + /// Handle an incoming iroh-docs connection. pub(super) async fn handle_connection(&self, conn: quinn::Connecting) -> anyhow::Result<()> { self.to_live_actor .send(ToLiveActor::HandleConnection { conn }) @@ -244,14 +244,14 @@ impl From for LiveEvent { impl LiveEvent { fn from_replica_event( - ev: iroh_sync::Event, + ev: iroh_docs::Event, content_status_cb: &ContentStatusCallback, ) -> Result { Ok(match ev { - iroh_sync::Event::LocalInsert { entry, .. } => Self::InsertLocal { + iroh_docs::Event::LocalInsert { entry, .. } => Self::InsertLocal { entry: entry.into(), }, - iroh_sync::Event::RemoteInsert { entry, from, .. } => Self::InsertRemote { + iroh_docs::Event::RemoteInsert { entry, from, .. } => Self::InsertRemote { content_status: content_status_cb(entry.content_hash()), entry: entry.into(), from: PublicKey::from_bytes(&from)?, diff --git a/iroh/src/sync_engine/gossip.rs b/iroh/src/docs_engine/gossip.rs similarity index 99% rename from iroh/src/sync_engine/gossip.rs rename to iroh/src/docs_engine/gossip.rs index 4771353151..9c9050d856 100644 --- a/iroh/src/sync_engine/gossip.rs +++ b/iroh/src/docs_engine/gossip.rs @@ -2,12 +2,12 @@ use std::collections::HashSet; use anyhow::{anyhow, Context, Result}; use futures_lite::StreamExt; +use iroh_docs::{actor::SyncHandle, ContentStatus, NamespaceId}; use iroh_gossip::{ net::{Event, Gossip}, proto::TopicId, }; use iroh_net::key::PublicKey; -use iroh_sync::{actor::SyncHandle, ContentStatus, NamespaceId}; use tokio::{ sync::{broadcast::error::RecvError, mpsc}, task::JoinSet, diff --git a/iroh/src/sync_engine/live.rs b/iroh/src/docs_engine/live.rs similarity index 98% rename from iroh/src/sync_engine/live.rs rename to iroh/src/docs_engine/live.rs index d22c77f950..0299ec03a5 100644 --- a/iroh/src/sync_engine/live.rs +++ b/iroh/src/docs_engine/live.rs @@ -5,14 +5,11 @@ use std::{collections::HashMap, time::SystemTime}; use anyhow::{Context, Result}; use futures_lite::FutureExt; -use iroh_bytes::downloader::{DownloadError, DownloadRequest, Downloader}; -use iroh_bytes::get::Stats; -use iroh_bytes::HashAndFormat; -use iroh_bytes::{store::EntryStatus, Hash}; -use iroh_gossip::{net::Gossip, proto::TopicId}; -use iroh_net::NodeId; -use iroh_net::{key::PublicKey, MagicEndpoint, NodeAddr}; -use iroh_sync::{ +use iroh_blobs::downloader::{DownloadError, DownloadRequest, Downloader}; +use iroh_blobs::get::Stats; +use iroh_blobs::HashAndFormat; +use iroh_blobs::{store::EntryStatus, Hash}; +use iroh_docs::{ actor::{OpenOpts, SyncHandle}, net::{ connect_and_sync, handle_connection, AbortReason, AcceptError, AcceptOutcome, ConnectError, @@ -20,6 +17,9 @@ use iroh_sync::{ }, AuthorHeads, ContentStatus, NamespaceId, SignedEntry, }; +use iroh_gossip::{net::Gossip, proto::TopicId}; +use iroh_net::NodeId; +use iroh_net::{key::PublicKey, MagicEndpoint, NodeAddr}; use serde::{Deserialize, Serialize}; use tokio::{ sync::{self, mpsc, oneshot}, @@ -30,7 +30,7 @@ use tracing::{debug, error, error_span, info, instrument, trace, warn, Instrumen use super::gossip::{GossipActor, ToGossipActor}; use super::state::{NamespaceStates, Origin, SyncReason}; -/// An iroh-sync operation +/// An iroh-docs operation /// /// This is the message that is broadcast over iroh-gossip. #[derive(Debug, Clone, Serialize, Deserialize, strum::Display)] @@ -129,7 +129,7 @@ type SyncAcceptRes = Result; type DownloadRes = (NamespaceId, Hash, Result); // Currently peers might double-sync in both directions. -pub struct LiveActor { +pub struct LiveActor { /// Receiver for actor messages. inbox: mpsc::Receiver, sync: SyncHandle, @@ -137,8 +137,8 @@ pub struct LiveActor { gossip: Gossip, bao_store: B, downloader: Downloader, - replica_events_tx: flume::Sender, - replica_events_rx: flume::Receiver, + replica_events_tx: flume::Sender, + replica_events_rx: flume::Receiver, /// Send messages to self. /// Note: Must not be used in methods called from `Self::run` directly to prevent deadlocks. @@ -163,7 +163,7 @@ pub struct LiveActor { /// Sync state per replica and peer state: NamespaceStates, } -impl LiveActor { +impl LiveActor { /// Create the live actor. #[allow(clippy::too_many_arguments)] pub fn new( @@ -654,9 +654,9 @@ impl LiveActor { } } - async fn on_replica_event(&mut self, event: iroh_sync::Event) -> Result<()> { + async fn on_replica_event(&mut self, event: iroh_docs::Event) -> Result<()> { match event { - iroh_sync::Event::LocalInsert { namespace, entry } => { + iroh_docs::Event::LocalInsert { namespace, entry } => { let topic = TopicId::from_bytes(*namespace.as_bytes()); // A new entry was inserted locally. Broadcast a gossip message. if self.state.is_syncing(&namespace) { @@ -665,7 +665,7 @@ impl LiveActor { self.gossip.broadcast(topic, message).await?; } } - iroh_sync::Event::RemoteInsert { + iroh_docs::Event::RemoteInsert { namespace, entry, from, diff --git a/iroh/src/sync_engine/rpc.rs b/iroh/src/docs_engine/rpc.rs similarity index 96% rename from iroh/src/sync_engine/rpc.rs rename to iroh/src/docs_engine/rpc.rs index 6749d715b0..fd65957bc6 100644 --- a/iroh/src/sync_engine/rpc.rs +++ b/iroh/src/docs_engine/rpc.rs @@ -1,9 +1,9 @@ -//! This module contains an impl block on [`SyncEngine`] with handlers for RPC requests +//! This module contains an impl block on [`Engine`] with handlers for RPC requests use anyhow::anyhow; use futures_lite::Stream; -use iroh_bytes::{store::Store as BaoStore, BlobFormat}; -use iroh_sync::{Author, DocTicket, NamespaceSecret}; +use iroh_blobs::{store::Store as BaoStore, BlobFormat}; +use iroh_docs::{Author, DocTicket, NamespaceSecret}; use tokio_stream::StreamExt; use crate::client::docs::ShareMode; @@ -12,6 +12,7 @@ use crate::rpc_protocol::{ AuthorImportRequest, AuthorImportResponse, DocGetSyncPeersRequest, DocGetSyncPeersResponse, }; use crate::{ + docs_engine::Engine, rpc_protocol::{ AuthorCreateRequest, AuthorCreateResponse, AuthorListRequest, AuthorListResponse, DocCloseRequest, DocCloseResponse, DocCreateRequest, DocCreateResponse, DocDelRequest, @@ -24,14 +25,13 @@ use crate::{ DocStartSyncRequest, DocStartSyncResponse, DocStatusRequest, DocStatusResponse, DocSubscribeRequest, DocSubscribeResponse, RpcResult, }, - sync_engine::SyncEngine, }; /// Capacity for the flume channels to forward sync store iterators to async RPC streams. const ITER_CHANNEL_CAP: usize = 64; #[allow(missing_docs)] -impl SyncEngine { +impl Engine { pub async fn author_create( &self, _req: AuthorCreateRequest, @@ -137,10 +137,10 @@ impl SyncEngine { me.apply_options(addr_options); let capability = match mode { - ShareMode::Read => iroh_sync::Capability::Read(doc_id), + ShareMode::Read => iroh_docs::Capability::Read(doc_id), ShareMode::Write => { let secret = self.sync.export_secret_key(doc_id).await?; - iroh_sync::Capability::Write(secret) + iroh_docs::Capability::Write(secret) } }; self.start_sync(doc_id, vec![]).await?; diff --git a/iroh/src/sync_engine/state.rs b/iroh/src/docs_engine/state.rs similarity index 99% rename from iroh/src/sync_engine/state.rs rename to iroh/src/docs_engine/state.rs index eac00026df..7bab017e05 100644 --- a/iroh/src/sync_engine/state.rs +++ b/iroh/src/docs_engine/state.rs @@ -1,9 +1,9 @@ use anyhow::Result; -use iroh_net::NodeId; -use iroh_sync::{ +use iroh_docs::{ net::{AbortReason, AcceptOutcome, SyncFinished}, NamespaceId, }; +use iroh_net::NodeId; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::time::{Instant, SystemTime}; diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index 6bf6ba4bfa..275c23459e 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -3,7 +3,7 @@ //! ## Feature Flags //! //! - `metrics`: Enable metrics collection. Enabled by default. -//! - `fs-store`: Enables the disk based storage backend for `iroh-bytes`. Enabled by default. +//! - `fs-store`: Enables the disk based storage backend for `iroh-blobs`. Enabled by default. //! #![cfg_attr(docsrs, feature(doc_cfg))] #![deny(missing_docs, rustdoc::broken_intra_doc_links)] @@ -12,18 +12,18 @@ #[doc(inline)] pub use iroh_base as base; #[doc(inline)] -pub use iroh_bytes as bytes; +pub use iroh_blobs as blobs; #[doc(inline)] -pub use iroh_net as net; +pub use iroh_docs as docs; #[doc(inline)] -pub use iroh_sync as sync; +pub use iroh_net as net; pub mod client; pub mod node; pub mod util; +mod docs_engine; mod rpc_protocol; -mod sync_engine; /// Expose metrics module #[cfg(feature = "metrics")] diff --git a/iroh/src/metrics.rs b/iroh/src/metrics.rs index 0fba864ef3..25e94b8cb1 100644 --- a/iroh/src/metrics.rs +++ b/iroh/src/metrics.rs @@ -38,7 +38,7 @@ impl Metric for Metrics { pub fn try_init_metrics_collection() -> std::io::Result<()> { iroh_metrics::core::Core::try_init(|reg, metrics| { metrics.insert(crate::metrics::Metrics::new(reg)); - metrics.insert(iroh_sync::metrics::Metrics::new(reg)); + metrics.insert(iroh_docs::metrics::Metrics::new(reg)); metrics.insert(iroh_net::metrics::MagicsockMetrics::new(reg)); metrics.insert(iroh_net::metrics::NetcheckMetrics::new(reg)); metrics.insert(iroh_net::metrics::PortmapMetrics::new(reg)); @@ -54,7 +54,7 @@ pub fn get_metrics() -> anyhow::Result> { let core = iroh_metrics::core::Core::get().ok_or_else(|| anyhow::anyhow!("metrics are disabled"))?; collect( - core.get_collector::(), + core.get_collector::(), &mut map, ); collect( diff --git a/iroh/src/node.rs b/iroh/src/node.rs index cdd107eb51..36d9dba3f5 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -13,10 +13,10 @@ use std::sync::Arc; use anyhow::{anyhow, Result}; use futures_lite::{future::Boxed as BoxFuture, FutureExt, StreamExt}; use iroh_base::ticket::BlobTicket; -use iroh_bytes::downloader::Downloader; -use iroh_bytes::store::Store as BaoStore; -use iroh_bytes::BlobFormat; -use iroh_bytes::Hash; +use iroh_blobs::downloader::Downloader; +use iroh_blobs::store::Store as BaoStore; +use iroh_blobs::BlobFormat; +use iroh_blobs::Hash; use iroh_net::relay::RelayUrl; use iroh_net::util::AbortingJoinHandle; use iroh_net::{ @@ -32,8 +32,8 @@ use tokio_util::sync::CancellationToken; use tokio_util::task::LocalPoolHandle; use tracing::debug; +use crate::docs_engine::Engine; use crate::rpc_protocol::{Request, Response}; -use crate::sync_engine::SyncEngine; mod builder; mod rpc; @@ -61,8 +61,8 @@ impl Callbacks { } } -impl iroh_bytes::provider::EventSender for Callbacks { - fn send(&self, event: iroh_bytes::provider::Event) -> BoxFuture<()> { +impl iroh_blobs::provider::EventSender for Callbacks { + fn send(&self, event: iroh_blobs::provider::Event) -> BoxFuture<()> { let this = self.clone(); async move { let cbs = this.0.read().await; @@ -105,30 +105,30 @@ struct NodeInner { gc_task: Option>, #[debug("rt")] rt: LocalPoolHandle, - pub(crate) sync: SyncEngine, + pub(crate) sync: Engine, downloader: Downloader, } /// Events emitted by the [`Node`] informing about the current status. #[derive(Debug, Clone)] pub enum Event { - /// Events from the iroh-bytes transfer protocol. - ByteProvide(iroh_bytes::provider::Event), + /// Events from the iroh-blobs transfer protocol. + ByteProvide(iroh_blobs::provider::Event), /// Events from database - Db(iroh_bytes::store::Event), + Db(iroh_blobs::store::Event), } /// In memory node. -pub type MemNode = Node; +pub type MemNode = Node; /// Persistent node. -pub type FsNode = Node; +pub type FsNode = Node; impl MemNode { /// Returns a new builder for the [`Node`], by default configured to run in memory. /// /// Once done with the builder call [`Builder::spawn`] to create the node. - pub fn memory() -> Builder { + pub fn memory() -> Builder { Builder::default() } } @@ -140,7 +140,7 @@ impl FsNode { /// Once done with the builder call [`Builder::spawn`] to create the node. pub async fn persistent( root: impl AsRef, - ) -> Result> { + ) -> Result> { Builder::default().persist(root).await } } @@ -279,7 +279,7 @@ mod tests { use anyhow::{bail, Context}; use bytes::Bytes; - use iroh_bytes::provider::AddProgress; + use iroh_blobs::provider::AddProgress; use iroh_net::{relay::RelayMode, test_utils::DnsPkarrServer}; use crate::{ @@ -339,7 +339,7 @@ mod tests { node.subscribe(move |event| { let r = r.clone(); async move { - if let Event::ByteProvide(iroh_bytes::provider::Event::TaggedBlobAdded { + if let Event::ByteProvide(iroh_blobs::provider::Event::TaggedBlobAdded { hash, .. }) = event diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index f60f03bd79..e423328e4e 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -9,11 +9,12 @@ use std::{ use anyhow::{bail, Context, Result}; use futures_lite::StreamExt; use iroh_base::key::SecretKey; -use iroh_bytes::{ +use iroh_blobs::{ downloader::Downloader, protocol::Closed, store::{GcMarkEvent, GcSweepEvent, Map, Store as BaoStore}, }; +use iroh_docs::net::DOCS_ALPN; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_net::{ discovery::{dns::DnsDiscovery, pkarr_publish::PkarrPublisher, ConcurrentDiscovery, Discovery}, @@ -22,7 +23,6 @@ use iroh_net::{ relay::RelayMode, MagicEndpoint, }; -use iroh_sync::net::SYNC_ALPN; use quic_rpc::{ transport::{misc::DummyServerEndpoint, quinn::QuinnServerEndpoint}, RpcServer, ServiceEndpoint, @@ -34,15 +34,15 @@ use tracing::{debug, error, error_span, info, trace, warn, Instrument}; use crate::{ client::RPC_ALPN, + docs_engine::Engine, node::{Event, NodeInner}, rpc_protocol::{Request, Response, RpcService}, - sync_engine::SyncEngine, util::{fs::load_secret_key, path::IrohPaths}, }; use super::{rpc, rpc_status::RpcStatus, Callbacks, EventCallback, Node}; -pub const PROTOCOLS: [&[u8]; 3] = [iroh_bytes::protocol::ALPN, GOSSIP_ALPN, SYNC_ALPN]; +pub const PROTOCOLS: [&[u8]; 3] = [iroh_blobs::protocol::ALPN, GOSSIP_ALPN, DOCS_ALPN]; /// Default bind address for the node. /// 11204 is "iroh" in leetspeak @@ -61,8 +61,8 @@ const MAX_STREAMS: u64 = 10; /// /// You must supply a blob store and a document store. /// -/// Blob store implementations are available in [`iroh_bytes::store`]. -/// Document store implementations are available in [`iroh_sync::store`]. +/// Blob store implementations are available in [`iroh_blobs::store`]. +/// Document store implementations are available in [`iroh_docs::store`]. /// /// Everything else is optional. /// @@ -86,7 +86,7 @@ where gc_policy: GcPolicy, dns_resolver: Option, node_discovery: DiscoveryConfig, - docs_store: iroh_sync::store::fs::Store, + docs_store: iroh_docs::store::fs::Store, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: bool, } @@ -120,7 +120,7 @@ impl From> for DiscoveryConfig { } } -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Self { storage: StorageConfig::Mem, @@ -132,7 +132,7 @@ impl Default for Builder { dns_resolver: None, rpc_endpoint: Default::default(), gc_policy: GcPolicy::Disabled, - docs_store: iroh_sync::store::Store::memory(), + docs_store: iroh_docs::store::Store::memory(), node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, @@ -144,7 +144,7 @@ impl Builder { /// Creates a new builder for [`Node`] using the given databases. pub fn with_db_and_store( blobs_store: D, - docs_store: iroh_sync::store::Store, + docs_store: iroh_docs::store::Store, storage: StorageConfig, ) -> Self { Self { @@ -174,26 +174,26 @@ where pub async fn persist( self, root: impl AsRef, - ) -> Result> { + ) -> Result> { let root = root.as_ref(); let blob_dir = IrohPaths::BaoStoreDir.with_root(root); tokio::fs::create_dir_all(&blob_dir).await?; - let blobs_store = iroh_bytes::store::fs::Store::load(&blob_dir) + let blobs_store = iroh_blobs::store::fs::Store::load(&blob_dir) .await .with_context(|| format!("Failed to load iroh database from {}", blob_dir.display()))?; let docs_store = - iroh_sync::store::fs::Store::persistent(IrohPaths::DocsDatabase.with_root(root))?; + iroh_docs::store::fs::Store::persistent(IrohPaths::DocsDatabase.with_root(root))?; let v0 = blobs_store - .import_flat_store(iroh_bytes::store::fs::FlatStorePaths { + .import_flat_store(iroh_blobs::store::fs::FlatStorePaths { complete: root.join("blobs.v0"), partial: root.join("blobs-partial.v0"), meta: root.join("blobs-meta.v0"), }) .await?; let v1 = blobs_store - .import_flat_store(iroh_bytes::store::fs::FlatStorePaths { + .import_flat_store(iroh_blobs::store::fs::FlatStorePaths { complete: root.join("blobs.v1").join("complete"), partial: root.join("blobs.v1").join("partial"), meta: root.join("blobs.v1").join("meta"), @@ -202,7 +202,7 @@ where if v0 || v1 { tracing::info!("flat data was imported - reapply inline options"); blobs_store - .update_inline_options(iroh_bytes::store::fs::InlineOptions::default(), true) + .update_inline_options(iroh_blobs::store::fs::InlineOptions::default(), true) .await?; } @@ -419,7 +419,7 @@ where // spawn the sync engine let downloader = Downloader::new(self.blobs_store.clone(), endpoint.clone(), lp.clone()); - let sync = SyncEngine::spawn( + let sync = Engine::spawn( endpoint.clone(), gossip.clone(), self.docs_store, @@ -608,7 +608,7 @@ where async fn gc_loop( db: D, - ds: iroh_sync::actor::SyncHandle, + ds: iroh_docs::actor::SyncHandle, gc_period: Duration, callbacks: Callbacks, ) { @@ -625,7 +625,7 @@ where tokio::time::sleep(gc_period).await; tracing::debug!("Starting GC"); callbacks - .send(Event::Db(iroh_bytes::store::Event::GcStarted)) + .send(Event::Db(iroh_blobs::store::Event::GcStarted)) .await; live.clear(); let doc_hashes = match ds.content_hashes().await { @@ -682,7 +682,7 @@ where } } callbacks - .send(Event::Db(iroh_bytes::store::Event::GcCompleted)) + .send(Event::Db(iroh_blobs::store::Event::GcCompleted)) .await; } } @@ -710,13 +710,13 @@ async fn handle_connection( alpn: String, node: Arc>, gossip: Gossip, - sync: SyncEngine, + sync: Engine, ) -> Result<()> { match alpn.as_bytes() { GOSSIP_ALPN => gossip.handle_connection(connecting.await?).await?, - SYNC_ALPN => sync.handle_connection(connecting).await?, - alpn if alpn == iroh_bytes::protocol::ALPN => { - iroh_bytes::provider::handle_connection( + DOCS_ALPN => sync.handle_connection(connecting).await?, + alpn if alpn == iroh_blobs::protocol::ALPN => { + iroh_blobs::provider::handle_connection( connecting, node.db.clone(), node.callbacks.clone(), diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index 9cdb9aba8b..5a8f9d5bab 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -8,15 +8,15 @@ use futures_buffered::BufferedStreamExt; use futures_lite::{Stream, StreamExt}; use genawaiter::sync::{Co, Gen}; use iroh_base::rpc::RpcResult; -use iroh_bytes::downloader::{DownloadRequest, Downloader}; -use iroh_bytes::export::ExportProgress; -use iroh_bytes::format::collection::Collection; -use iroh_bytes::get::db::DownloadProgress; -use iroh_bytes::get::Stats; -use iroh_bytes::store::{ConsistencyCheckProgress, ExportFormat, ImportProgress, MapEntry}; -use iroh_bytes::util::progress::ProgressSender; -use iroh_bytes::BlobFormat; -use iroh_bytes::{ +use iroh_blobs::downloader::{DownloadRequest, Downloader}; +use iroh_blobs::export::ExportProgress; +use iroh_blobs::format::collection::Collection; +use iroh_blobs::get::db::DownloadProgress; +use iroh_blobs::get::Stats; +use iroh_blobs::store::{ConsistencyCheckProgress, ExportFormat, ImportProgress, MapEntry}; +use iroh_blobs::util::progress::ProgressSender; +use iroh_blobs::BlobFormat; +use iroh_blobs::{ hashseq::parse_hash_seq, provider::AddProgress, store::{Store as BaoStore, ValidateProgress}, @@ -498,7 +498,7 @@ impl Handler { msg: DocImportFileRequest, progress: flume::Sender, ) -> anyhow::Result<()> { - use iroh_bytes::store::ImportMode; + use iroh_blobs::store::ImportMode; use std::collections::BTreeMap; let progress = FlumeProgressSender::new(progress); @@ -593,7 +593,7 @@ impl Handler { } x }); - iroh_bytes::export::export( + iroh_blobs::export::export( &self.inner.db, entry.content_hash(), path, @@ -628,7 +628,7 @@ impl Handler { let (tx, rx) = flume::bounded(1024); let progress = FlumeProgressSender::new(tx); self.rt().spawn_pinned(move || async move { - let res = iroh_bytes::export::export( + let res = iroh_blobs::export::export( &self.inner.db, msg.hash, msg.path, @@ -650,7 +650,7 @@ impl Handler { msg: BlobAddPathRequest, progress: flume::Sender, ) -> anyhow::Result<()> { - use iroh_bytes::store::ImportMode; + use iroh_blobs::store::ImportMode; use std::collections::BTreeMap; let progress = FlumeProgressSender::new(progress); @@ -760,7 +760,7 @@ impl Handler { self.inner .callbacks .send(Event::ByteProvide( - iroh_bytes::provider::Event::TaggedBlobAdded { hash, format, tag }, + iroh_blobs::provider::Event::TaggedBlobAdded { hash, format, tag }, )) .await; @@ -1048,7 +1048,7 @@ async fn download( progress: FlumeProgressSender, ) -> Result<()> where - D: iroh_bytes::store::Store, + D: iroh_blobs::store::Store, { let BlobDownloadRequest { hash, @@ -1152,13 +1152,13 @@ where let get_conn = { let progress = progress.clone(); move || async move { - let conn = endpoint.connect(node, iroh_bytes::protocol::ALPN).await?; + let conn = endpoint.connect(node, iroh_blobs::protocol::ALPN).await?; progress.send(DownloadProgress::Connected).await?; Ok(conn) } }; - let res = iroh_bytes::get::db::get_to_db(db, get_conn, &hash_and_format, progress).await; + let res = iroh_blobs::get::db::get_to_db(db, get_conn, &hash_and_format, progress).await; if res.is_ok() { match tag { diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index 4af54cb922..3fc34f5edd 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -12,8 +12,8 @@ use std::{collections::BTreeMap, path::PathBuf}; use bytes::Bytes; use derive_more::{From, TryInto}; use iroh_base::node_addr::AddrInfoOptions; -pub use iroh_bytes::{export::ExportProgress, get::db::DownloadProgress, BlobFormat, Hash}; -use iroh_bytes::{ +pub use iroh_blobs::{export::ExportProgress, get::db::DownloadProgress, BlobFormat, Hash}; +use iroh_blobs::{ format::collection::Collection, store::{BaoBlobSize, ConsistencyCheckProgress}, util::Tag, @@ -24,7 +24,7 @@ use iroh_net::{ NodeId, }; -use iroh_sync::{ +use iroh_docs::{ actor::OpenState, store::{DownloadPolicy, Query}, Author, AuthorId, CapabilityKind, DocTicket, Entry, NamespaceId, PeerIdBytes, SignedEntry, @@ -37,8 +37,8 @@ use quic_rpc::{ use serde::{Deserialize, Serialize}; pub use iroh_base::rpc::{RpcError, RpcResult}; -use iroh_bytes::store::{ExportFormat, ExportMode}; -pub use iroh_bytes::{provider::AddProgress, store::ValidateProgress}; +use iroh_blobs::store::{ExportFormat, ExportMode}; +pub use iroh_blobs::{provider::AddProgress, store::ValidateProgress}; use crate::{ client::{ @@ -47,9 +47,9 @@ use crate::{ node::NodeStatus, tags::TagInfo, }, - sync_engine::LiveEvent, + docs_engine::LiveEvent, }; -pub use iroh_bytes::util::SetTagOption; +pub use iroh_blobs::util::SetTagOption; /// A request to the node to provide the data at the given path /// diff --git a/iroh/src/util/path.rs b/iroh/src/util/path.rs index b9d7093f4a..0240e11de5 100644 --- a/iroh/src/util/path.rs +++ b/iroh/src/util/path.rs @@ -9,10 +9,10 @@ pub enum IrohPaths { /// Path to the node's secret key for the [`iroh_net::key::PublicKey`]. #[strum(serialize = "keypair")] SecretKey, - /// Path to the node's [file based blob store](iroh_bytes::store::fs::Store). + /// Path to the node's [file based blob store](iroh_blobs::store::fs::Store). #[strum(serialize = "blobs")] BaoStoreDir, - /// Path to the [iroh-sync document database](iroh_sync::store::fs::Store) + /// Path to the [iroh-docs document database](iroh_docs::store::fs::Store) #[strum(serialize = "docs.redb")] DocsDatabase, /// Path to the console state diff --git a/iroh/src/util/progress.rs b/iroh/src/util/progress.rs index 6f9ada749b..5bbb4e572a 100644 --- a/iroh/src/util/progress.rs +++ b/iroh/src/util/progress.rs @@ -15,7 +15,7 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Poll; -use iroh_bytes::util::io::TrackingWriter; +use iroh_blobs::util::io::TrackingWriter; use portable_atomic::{AtomicU16, AtomicU64}; use tokio::io::{self, AsyncRead, AsyncWrite}; use tokio::sync::{broadcast, mpsc}; diff --git a/iroh/tests/gc.rs b/iroh/tests/gc.rs index fe27b4f93d..e0899fb2fb 100644 --- a/iroh/tests/gc.rs +++ b/iroh/tests/gc.rs @@ -10,7 +10,7 @@ use futures_lite::FutureExt; use iroh::node::{self, Node}; use rand::RngCore; -use iroh_bytes::{ +use iroh_blobs::{ hashseq::HashSeq, store::{EntryStatus, MapMut, Store}, util::Tag, @@ -40,9 +40,9 @@ pub fn simulate_remote(data: &[u8]) -> (blake3::Hash, Cursor) { /// Wrap a bao store in a node that has gc enabled. async fn wrap_in_node(bao_store: S, gc_period: Duration) -> Node where - S: iroh_bytes::store::Store, + S: iroh_blobs::store::Store, { - let doc_store = iroh_sync::store::Store::memory(); + let doc_store = iroh_docs::store::Store::memory(); node::Builder::with_db_and_store(bao_store, doc_store, iroh::node::StorageConfig::Mem) .gc_policy(iroh::node::GcPolicy::Interval(gc_period)) .spawn() @@ -50,9 +50,9 @@ where .unwrap() } -async fn attach_db_events( +async fn attach_db_events( node: &Node, -) -> flume::Receiver { +) -> flume::Receiver { let (db_send, db_recv) = flume::unbounded(); node.subscribe(move |ev| { let db_send = db_send.clone(); @@ -69,21 +69,21 @@ async fn attach_db_events( } async fn gc_test_node() -> ( - Node, - iroh_bytes::store::mem::Store, - flume::Receiver, + Node, + iroh_blobs::store::mem::Store, + flume::Receiver, ) { - let bao_store = iroh_bytes::store::mem::Store::new(); + let bao_store = iroh_blobs::store::mem::Store::new(); let node = wrap_in_node(bao_store.clone(), Duration::from_millis(500)).await; let db_recv = attach_db_events(&node).await; (node, bao_store, db_recv) } -async fn step(evs: &flume::Receiver) { +async fn step(evs: &flume::Receiver) { while evs.try_recv().is_ok() {} for _ in 0..3 { while let Ok(ev) = evs.recv_async().await { - if let iroh_bytes::store::Event::GcCompleted = ev { + if let iroh_blobs::store::Event::GcCompleted = ev { break; } } @@ -203,23 +203,23 @@ mod file { use iroh_io::AsyncSliceReaderExt; use testdir::testdir; - use iroh_bytes::{ + use iroh_blobs::{ store::{BaoBatchWriter, ConsistencyCheckProgress, Map, MapEntryMut, ReportLevel}, util::progress::{FlumeProgressSender, ProgressSender as _}, TempTag, }; use tokio::io::AsyncReadExt; - fn path(root: PathBuf, suffix: &'static str) -> impl Fn(&iroh_bytes::Hash) -> PathBuf { + fn path(root: PathBuf, suffix: &'static str) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { move |hash| root.join(format!("{}.{}", hash.to_hex(), suffix)) } - fn data_path(root: PathBuf) -> impl Fn(&iroh_bytes::Hash) -> PathBuf { + fn data_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { // this assumes knowledge of the internal directory structure of the flat store path(root.join("data"), "data") } - fn outboard_path(root: PathBuf) -> impl Fn(&iroh_bytes::Hash) -> PathBuf { + fn outboard_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { // this assumes knowledge of the internal directory structure of the flat store path(root.join("data"), "obao4") } @@ -245,7 +245,7 @@ mod file { async fn redb_doc_import_stress() -> Result<()> { let _ = tracing_subscriber::fmt::try_init(); let dir = testdir!(); - let bao_store = iroh_bytes::store::fs::Store::load(dir.join("store")).await?; + let bao_store = iroh_blobs::store::fs::Store::load(dir.join("store")).await?; let node = wrap_in_node(bao_store.clone(), Duration::from_secs(10)).await; let client = node.client(); let doc = client.docs.create().await?; @@ -288,7 +288,7 @@ mod file { let path = data_path(dir.clone()); let outboard_path = outboard_path(dir.clone()); - let bao_store = iroh_bytes::store::fs::Store::load(dir.clone()).await?; + let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; let node = wrap_in_node(bao_store.clone(), Duration::from_millis(100)).await; let evs = attach_db_events(&node).await; let data1 = create_test_data(10000000); @@ -390,7 +390,7 @@ mod file { /// the outboard file, then commit it to a complete entry. /// /// During this time, the partial entry is protected by a temp tag. - async fn simulate_download_partial( + async fn simulate_download_partial( bao_store: &S, data: Bytes, ) -> io::Result<(S::EntryMut, TempTag)> { @@ -432,7 +432,7 @@ mod file { Ok((entry, tt)) } - async fn simulate_download_complete( + async fn simulate_download_complete( bao_store: &S, data: Bytes, ) -> io::Result { @@ -451,7 +451,7 @@ mod file { let path = data_path(dir.clone()); let outboard_path = outboard_path(dir.clone()); - let bao_store = iroh_bytes::store::fs::Store::load(dir.clone()).await?; + let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; let node = wrap_in_node(bao_store.clone(), Duration::from_millis(10)).await; let evs = attach_db_events(&node).await; @@ -483,7 +483,7 @@ mod file { let _ = tracing_subscriber::fmt::try_init(); let dir = testdir!(); - let bao_store = iroh_bytes::store::fs::Store::load(dir.clone()).await?; + let bao_store = iroh_blobs::store::fs::Store::load(dir.clone()).await?; let node = wrap_in_node(bao_store.clone(), Duration::from_secs(1)).await; let evs = attach_db_events(&node).await; diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs index ec9eade08a..9f8370b23f 100644 --- a/iroh/tests/provide.rs +++ b/iroh/tests/provide.rs @@ -15,7 +15,7 @@ use rand::RngCore; use tokio::sync::mpsc; use bao_tree::{blake3, ChunkNum, ChunkRanges}; -use iroh_bytes::{ +use iroh_blobs::{ format::collection::Collection, get::{ fsm::ConnectedNext, @@ -35,13 +35,13 @@ async fn dial(secret_key: SecretKey, peer: NodeAddr) -> anyhow::Result(db: D) -> Builder { - let store = iroh_sync::store::Store::memory(); + let store = iroh_docs::store::Store::memory(); iroh::node::Builder::with_db_and_store(db, store, iroh::node::StorageConfig::Mem).bind_port(0) } @@ -141,7 +141,7 @@ fn get_options(node_id: NodeId, addrs: Vec) -> (SecretKey, NodeAddr) async fn multiple_clients() -> Result<()> { let content = b"hello world!"; - let mut db = iroh_bytes::store::readonly_mem::Store::default(); + let mut db = iroh_blobs::store::readonly_mem::Store::default(); let expect_hash = db.insert(content.as_slice()); let expect_name = "hello_world"; let collection = Collection::from_iter([(expect_name, expect_hash)]); @@ -202,7 +202,7 @@ where let mut expects = Vec::new(); let num_blobs = file_opts.len(); - let (mut mdb, _lookup) = iroh_bytes::store::readonly_mem::Store::new(file_opts.clone()); + let (mut mdb, _lookup) = iroh_blobs::store::readonly_mem::Store::new(file_opts.clone()); let mut blobs = Vec::new(); for opt in file_opts.into_iter() { @@ -314,7 +314,7 @@ fn assert_events(events: Vec, num_blobs: usize) { async fn test_server_close() { // Prepare a Provider transferring a file. let _guard = iroh_test::logging::setup(); - let mut db = iroh_bytes::store::readonly_mem::Store::default(); + let mut db = iroh_blobs::store::readonly_mem::Store::default(); let child_hash = db.insert(b"hello there"); let collection = Collection::from_iter([("hello", child_hash)]); let hash = db.insert_many(collection.to_blobs()).unwrap(); @@ -370,8 +370,8 @@ async fn test_server_close() { /// returns the database and the root hash of the collection fn create_test_db( entries: impl IntoIterator, impl AsRef<[u8]>)>, -) -> (iroh_bytes::store::readonly_mem::Store, Hash) { - let (mut db, hashes) = iroh_bytes::store::readonly_mem::Store::new(entries); +) -> (iroh_blobs::store::readonly_mem::Store, Hash) { + let (mut db, hashes) = iroh_blobs::store::readonly_mem::Store::new(entries); let collection = Collection::from_iter(hashes); let hash = db.insert_many(collection.to_blobs()).unwrap(); (db, hash) @@ -409,7 +409,7 @@ async fn test_ipv6() { async fn test_not_found() { let _ = iroh_test::logging::setup(); - let db = iroh_bytes::store::readonly_mem::Store::default(); + let db = iroh_blobs::store::readonly_mem::Store::default(); let hash = blake3::hash(b"hello").into(); let node = match test_node(db).spawn().await { Ok(provider) => provider, @@ -450,7 +450,7 @@ async fn test_not_found() { async fn test_chunk_not_found_1() { let _ = iroh_test::logging::setup(); - let db = iroh_bytes::store::mem::Store::new(); + let db = iroh_blobs::store::mem::Store::new(); let data = (0..1024 * 64).map(|i| i as u8).collect::>(); let hash = blake3::hash(&data).into(); let _entry = db.get_or_create(hash, data.len() as u64).await.unwrap(); @@ -580,7 +580,7 @@ fn make_test_data(n: usize) -> Vec { async fn test_size_request_blob() { let expected = make_test_data(1024 * 64 + 1234); let last_chunk = last_chunk(&expected); - let (db, hashes) = iroh_bytes::store::readonly_mem::Store::new([("test", &expected)]); + let (db, hashes) = iroh_blobs::store::readonly_mem::Store::new([("test", &expected)]); let hash = Hash::from(*hashes.values().next().unwrap()); let node = test_node(db).spawn().await.unwrap(); let addrs = node.local_endpoint_addresses().await.unwrap(); diff --git a/iroh/tests/sync.rs b/iroh/tests/sync.rs index d466dc1463..74f3a8880a 100644 --- a/iroh/tests/sync.rs +++ b/iroh/tests/sync.rs @@ -23,16 +23,16 @@ use rand::{CryptoRng, Rng, SeedableRng}; use tracing::{debug, error_span, info, Instrument}; use tracing_subscriber::{prelude::*, EnvFilter}; -use iroh_bytes::Hash; -use iroh_net::relay::RelayMode; -use iroh_sync::{ +use iroh_blobs::Hash; +use iroh_docs::{ store::{DownloadPolicy, FilterKind, Query}, AuthorId, ContentStatus, }; +use iroh_net::relay::RelayMode; const TIMEOUT: Duration = Duration::from_secs(60); -fn test_node(secret_key: SecretKey) -> Builder { +fn test_node(secret_key: SecretKey) -> Builder { Node::memory() .secret_key(secret_key) .relay_mode(RelayMode::Disabled) @@ -44,7 +44,7 @@ fn test_node(secret_key: SecretKey) -> Builder impl Future>> + 'static { +) -> impl Future>> + 'static { let secret_key = SecretKey::generate_with_rng(rng); async move { let node = test_node(secret_key); @@ -57,7 +57,7 @@ fn spawn_node( async fn spawn_nodes( n: usize, mut rng: &mut (impl CryptoRng + Rng), -) -> anyhow::Result>> { +) -> anyhow::Result>> { let mut futs = vec![]; for i in 0..n { futs.push(spawn_node(i, &mut rng)); @@ -743,7 +743,7 @@ async fn test_download_policies() -> Result<()> { let mut events_a = doc_a.subscribe().await?; let mut events_b = doc_b.subscribe().await?; - let mut key_hashes: HashMap = HashMap::default(); + let mut key_hashes: HashMap = HashMap::default(); // set content in a for k in star_wars_movies.iter() {