diff --git a/Cargo.lock b/Cargo.lock index 0a06fd14bf..8cd20fe5bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2662,10 +2662,14 @@ dependencies = [ "derive_more", "ed25519-dalek", "flume", + "futures-buffered", + "futures-lite 2.3.0", "futures-util", "hex", "iroh-base", "iroh-blake3", + "iroh-blobs", + "iroh-gossip", "iroh-metrics", "iroh-net", "iroh-test", diff --git a/iroh-docs/Cargo.toml b/iroh-docs/Cargo.toml index d1e39d56df..005d2e1ea3 100644 --- a/iroh-docs/Cargo.toml +++ b/iroh-docs/Cargo.toml @@ -17,35 +17,35 @@ workspace = true [dependencies] anyhow = "1" blake3 = { package = "iroh-blake3", version = "1.4.5"} +bytes = { version = "1.4", features = ["serde"] } derive_more = { version = "1.0.0-beta.6", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref"] } ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } flume = "0.11" +futures-buffered = "0.2.4" +futures-lite = "2.3.0" +futures-util = { version = "0.3.25", optional = true } +hex = "0.4" iroh-base = { version = "0.17.0", path = "../iroh-base" } +iroh-blobs = { version = "0.17.0", path = "../iroh-blobs", optional = true, features = ["downloader"] } +iroh-gossip = { version = "0.17.0", path = "../iroh-gossip", optional = true } iroh-metrics = { version = "0.17.0", path = "../iroh-metrics", optional = true } +iroh-net = { version = "0.17.0", optional = true, path = "../iroh-net" } +lru = "0.12" num_enum = "0.7" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = "0.8.5" rand_core = "0.6.4" +redb = { version = "2.0.0" } +redb_v1 = { package = "redb", version = "1.5.1" } +self_cell = "1.0.3" serde = { version = "1.0.164", features = ["derive"] } strum = { version = "0.25", features = ["derive"] } -bytes = { version = "1.4", features = ["serde"] } -hex = "0.4" +tempfile = { version = "3.4" } thiserror = "1" -tracing = "0.1" tokio = { version = "1", features = ["sync"] } - -# fs-store -redb = { version = "2.0.0" } -redb_v1 = { package = "redb", version = "1.5.1" } -tempfile = { version = "3.4" } - -# net -iroh-net = { version = "0.17.0", optional = true, path = "../iroh-net" } -tokio-util = { version = "0.7", optional = true, features = ["codec", "io-util", "io"] } tokio-stream = { version = "0.1", optional = true, features = ["sync"]} -futures-util = { version = "0.3.25", optional = true } -lru = "0.12" -self_cell = "1.0.3" +tokio-util = { version = "0.7", optional = true, features = ["codec", "io-util", "io"] } +tracing = "0.1" [dev-dependencies] iroh-test = { path = "../iroh-test" } @@ -56,9 +56,10 @@ tempfile = "3.4" test-strategy = "0.3.1" [features] -default = ["net", "metrics"] +default = ["net", "metrics", "engine"] net = ["dep:iroh-net", "tokio/io-util", "dep:tokio-stream", "dep:tokio-util", "dep:futures-util"] metrics = ["dep:iroh-metrics"] +engine = ["net", "dep:iroh-gossip", "dep:iroh-blobs"] [package.metadata.docs.rs] all-features = true diff --git a/iroh/src/docs_engine.rs b/iroh-docs/src/engine.rs similarity index 84% rename from iroh/src/docs_engine.rs rename to iroh-docs/src/engine.rs index b64870fda3..b5345b0bea 100644 --- a/iroh/src/docs_engine.rs +++ b/iroh-docs/src/engine.rs @@ -1,6 +1,6 @@ -//! Handlers and actors to for live syncing [`iroh_docs`] replicas. +//! Handlers and actors to for live syncing replicas. //! -//! [`iroh_docs::Replica`] is also called documents here. +//! [`crate::Replica`] is also called documents here. use std::path::PathBuf; use std::{ @@ -13,8 +13,6 @@ use anyhow::{bail, Context, Result}; use futures_lite::{Stream, StreamExt}; use iroh_blobs::downloader::Downloader; use iroh_blobs::{store::EntryStatus, Hash}; -use iroh_docs::{actor::SyncHandle, ContentStatus, ContentStatusCallback, Entry, NamespaceId}; -use iroh_docs::{Author, AuthorId}; use iroh_gossip::net::Gossip; use iroh_net::util::SharedAbortingJoinHandle; use iroh_net::{key::PublicKey, Endpoint, NodeAddr}; @@ -22,17 +20,19 @@ use serde::{Deserialize, Serialize}; use tokio::sync::{mpsc, oneshot}; use tracing::{error, error_span, Instrument}; -mod gossip; -mod live; -pub mod rpc; -mod state; +use crate::{actor::SyncHandle, ContentStatus, ContentStatusCallback, Entry, NamespaceId}; +use crate::{Author, AuthorId}; -use gossip::GossipActor; -use live::{LiveActor, ToLiveActor}; +use self::gossip::GossipActor; +use self::live::{LiveActor, ToLiveActor}; pub use self::live::SyncEvent; pub use self::state::{Origin, SyncReason}; +mod gossip; +mod live; +mod state; + /// Capacity of the channel for the [`ToLiveActor`] messages. const ACTOR_CHANNEL_CAP: usize = 64; /// Capacity for the channels for [`Engine::subscribe`]. @@ -40,30 +40,30 @@ const SUBSCRIBE_CHANNEL_CAP: usize = 256; /// The sync engine coordinates actors that manage open documents, set-reconciliation syncs with /// peers and a gossip swarm for each syncing document. -/// -/// The RPC methods dealing with documents and sync operate on the `Engine`, with method -/// implementations in [rpc]. #[derive(derive_more::Debug, Clone)] pub struct Engine { - pub(crate) endpoint: Endpoint, - pub(crate) sync: SyncHandle, + /// [`Endpoint`] used by the engine. + pub endpoint: Endpoint, + /// Handle to the actor thread. + pub sync: SyncHandle, + /// The persistent default author for this engine. + pub default_author: Arc, to_live_actor: mpsc::Sender, #[allow(dead_code)] actor_handle: SharedAbortingJoinHandle<()>, #[debug("ContentStatusCallback")] content_status_cb: ContentStatusCallback, - default_author: Arc, } impl Engine { /// Start the sync engine. /// /// This will spawn two tokio tasks for the live sync coordination and gossip actors, and a - /// thread for the [`iroh_docs::actor::SyncHandle`]. - pub(crate) async fn spawn( + /// thread for the [`crate::actor::SyncHandle`]. + pub async fn spawn( endpoint: Endpoint, gossip: Gossip, - replica_store: iroh_docs::store::Store, + replica_store: crate::store::Store, bao_store: B, downloader: Downloader, default_author_storage: DefaultAuthorStorage, @@ -127,7 +127,7 @@ impl Engine { /// /// If `peers` is non-empty, it will both do an initial set-reconciliation sync with each peer, /// and join an iroh-gossip swarm with these peers to receive and broadcast document updates. - async fn start_sync(&self, namespace: NamespaceId, peers: Vec) -> Result<()> { + pub async fn start_sync(&self, namespace: NamespaceId, peers: Vec) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.to_live_actor .send(ToLiveActor::StartSync { @@ -144,7 +144,7 @@ impl Engine { /// /// If `kill_subscribers` is true, all existing event subscribers will be dropped. This means /// they will receive `None` and no further events in case of rejoining the document. - async fn leave(&self, namespace: NamespaceId, kill_subscribers: bool) -> Result<()> { + pub async fn leave(&self, namespace: NamespaceId, kill_subscribers: bool) -> Result<()> { let (reply, reply_rx) = oneshot::channel(); self.to_live_actor .send(ToLiveActor::Leave { @@ -158,7 +158,7 @@ impl Engine { } /// Subscribe to replica and sync progress events. - async fn subscribe( + pub async fn subscribe( &self, namespace: NamespaceId, ) -> Result> + Unpin + 'static> { @@ -195,7 +195,7 @@ impl Engine { } /// Handle an incoming iroh-docs connection. - pub(super) async fn handle_connection( + pub async fn handle_connection( &self, conn: iroh_net::endpoint::Connecting, ) -> anyhow::Result<()> { @@ -205,13 +205,15 @@ impl Engine { Ok(()) } - pub(crate) async fn start_shutdown(&self) -> Result<()> { + /// Shutdown the engine. + pub async fn shutdown(&self) -> Result<()> { self.to_live_actor.send(ToLiveActor::Shutdown).await?; Ok(()) } } -pub(crate) fn entry_to_content_status(entry: io::Result) -> ContentStatus { +/// Converts an [`EntryStatus`] into a ['ContentStatus']. +pub fn entry_to_content_status(entry: io::Result) -> ContentStatus { match entry { Ok(EntryStatus::Complete) => ContentStatus::Complete, Ok(EntryStatus::Partial) => ContentStatus::Incomplete, @@ -277,14 +279,14 @@ impl From for LiveEvent { impl LiveEvent { fn from_replica_event( - ev: iroh_docs::Event, + ev: crate::Event, content_status_cb: &ContentStatusCallback, ) -> Result { Ok(match ev { - iroh_docs::Event::LocalInsert { entry, .. } => Self::InsertLocal { + crate::Event::LocalInsert { entry, .. } => Self::InsertLocal { entry: entry.into(), }, - iroh_docs::Event::RemoteInsert { entry, from, .. } => Self::InsertRemote { + crate::Event::RemoteInsert { entry, from, .. } => Self::InsertRemote { content_status: content_status_cb(entry.content_hash()), entry: entry.into(), from: PublicKey::from_bytes(&from)?, @@ -302,11 +304,19 @@ impl LiveEvent { /// path (as base32 encoded string of the author's public key). #[derive(Debug)] pub enum DefaultAuthorStorage { + /// Memory storage. Mem, + /// File based persistent storage. Persistent(PathBuf), } impl DefaultAuthorStorage { + /// Load the default author from the storage. + /// + /// Will create and save a new author if the storage is empty. + /// + /// Returns an error if the author can't be parsed or if the uathor does not exist in the docs + /// store. pub async fn load(&self, docs_store: &SyncHandle) -> anyhow::Result { match self { Self::Mem => { @@ -343,6 +353,8 @@ impl DefaultAuthorStorage { } } } + + /// Save a new default author. pub async fn persist(&self, author_id: AuthorId) -> anyhow::Result<()> { match self { Self::Mem => { @@ -363,24 +375,32 @@ impl DefaultAuthorStorage { } } +/// Peristent default author for a docs engine. #[derive(Debug)] -struct DefaultAuthor { +pub struct DefaultAuthor { value: RwLock, storage: DefaultAuthorStorage, } impl DefaultAuthor { - async fn load(storage: DefaultAuthorStorage, docs_store: &SyncHandle) -> Result { + /// Load the default author from storage. + /// + /// If the storage is empty creates a new author and perists it. + pub async fn load(storage: DefaultAuthorStorage, docs_store: &SyncHandle) -> Result { let value = storage.load(docs_store).await?; Ok(Self { value: RwLock::new(value), storage, }) } - fn get(&self) -> AuthorId { + + /// Get the current default author. + pub fn get(&self) -> AuthorId { *self.value.read().unwrap() } - async fn set(&self, author_id: AuthorId, docs_store: &SyncHandle) -> Result<()> { + + /// Set the default author. + pub async fn set(&self, author_id: AuthorId, docs_store: &SyncHandle) -> Result<()> { if docs_store.export_author(author_id).await?.is_none() { bail!("The author does not exist"); } diff --git a/iroh/src/docs_engine/gossip.rs b/iroh-docs/src/engine/gossip.rs similarity index 99% rename from iroh/src/docs_engine/gossip.rs rename to iroh-docs/src/engine/gossip.rs index 373bd20ec6..17077ac802 100644 --- a/iroh/src/docs_engine/gossip.rs +++ b/iroh-docs/src/engine/gossip.rs @@ -3,7 +3,6 @@ use std::collections::HashSet; use anyhow::{Context, Result}; use futures_lite::StreamExt; use futures_util::FutureExt; -use iroh_docs::{actor::SyncHandle, ContentStatus, NamespaceId}; use iroh_gossip::net::{Event, Gossip}; use iroh_net::key::PublicKey; use tokio::{ @@ -16,6 +15,8 @@ use tokio_stream::{ }; use tracing::{debug, error, trace, warn}; +use crate::{actor::SyncHandle, ContentStatus, NamespaceId}; + use super::live::{Op, ToLiveActor}; #[derive(strum::Display, Debug)] diff --git a/iroh/src/docs_engine/live.rs b/iroh-docs/src/engine/live.rs similarity index 99% rename from iroh/src/docs_engine/live.rs rename to iroh-docs/src/engine/live.rs index 8dd3d5843a..5c7608722b 100644 --- a/iroh/src/docs_engine/live.rs +++ b/iroh-docs/src/engine/live.rs @@ -9,14 +9,6 @@ use iroh_blobs::downloader::{DownloadError, DownloadRequest, Downloader}; use iroh_blobs::get::Stats; use iroh_blobs::HashAndFormat; use iroh_blobs::{store::EntryStatus, Hash}; -use iroh_docs::{ - actor::{OpenOpts, SyncHandle}, - net::{ - connect_and_sync, handle_connection, AbortReason, AcceptError, AcceptOutcome, ConnectError, - SyncFinished, - }, - AuthorHeads, ContentStatus, NamespaceId, SignedEntry, -}; use iroh_gossip::{net::Gossip, proto::TopicId}; use iroh_net::NodeId; use iroh_net::{key::PublicKey, Endpoint, NodeAddr}; @@ -27,6 +19,15 @@ use tokio::{ }; use tracing::{debug, error, error_span, info, instrument, trace, warn, Instrument, Span}; +use crate::{ + actor::{OpenOpts, SyncHandle}, + net::{ + connect_and_sync, handle_connection, AbortReason, AcceptError, AcceptOutcome, ConnectError, + SyncFinished, + }, + AuthorHeads, ContentStatus, NamespaceId, SignedEntry, +}; + use super::gossip::{GossipActor, ToGossipActor}; use super::state::{NamespaceStates, Origin, SyncReason}; @@ -145,8 +146,8 @@ pub struct LiveActor { gossip: Gossip, bao_store: B, downloader: Downloader, - replica_events_tx: flume::Sender, - replica_events_rx: flume::Receiver, + replica_events_tx: flume::Sender, + replica_events_rx: flume::Receiver, /// Send messages to self. /// Note: Must not be used in methods called from `Self::run` directly to prevent deadlocks. @@ -684,9 +685,9 @@ impl LiveActor { } } - async fn on_replica_event(&mut self, event: iroh_docs::Event) -> Result<()> { + async fn on_replica_event(&mut self, event: crate::Event) -> Result<()> { match event { - iroh_docs::Event::LocalInsert { namespace, entry } => { + crate::Event::LocalInsert { namespace, entry } => { debug!(namespace=%namespace.fmt_short(), "replica event: LocalInsert"); let topic = TopicId::from_bytes(*namespace.as_bytes()); // A new entry was inserted locally. Broadcast a gossip message. @@ -696,7 +697,7 @@ impl LiveActor { self.gossip.broadcast(topic, message).await?; } } - iroh_docs::Event::RemoteInsert { + crate::Event::RemoteInsert { namespace, entry, from, diff --git a/iroh/src/docs_engine/state.rs b/iroh-docs/src/engine/state.rs similarity index 99% rename from iroh/src/docs_engine/state.rs rename to iroh-docs/src/engine/state.rs index 91e28a721e..c9d4a1d0e0 100644 --- a/iroh/src/docs_engine/state.rs +++ b/iroh-docs/src/engine/state.rs @@ -1,8 +1,8 @@ -use anyhow::Result; -use iroh_docs::{ +use crate::{ net::{AbortReason, AcceptOutcome, SyncFinished}, NamespaceId, }; +use anyhow::Result; use iroh_net::NodeId; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; diff --git a/iroh-docs/src/lib.rs b/iroh-docs/src/lib.rs index 7bee08d712..b1347fe7be 100644 --- a/iroh-docs/src/lib.rs +++ b/iroh-docs/src/lib.rs @@ -40,6 +40,9 @@ pub mod net; #[cfg(feature = "net")] mod ticket; +#[cfg(feature = "engine")] +pub mod engine; + pub mod actor; pub mod store; pub mod sync; diff --git a/iroh/src/client/docs.rs b/iroh/src/client/docs.rs index c335d082e2..2a35233eba 100644 --- a/iroh/src/client/docs.rs +++ b/iroh/src/client/docs.rs @@ -33,7 +33,7 @@ use crate::rpc_protocol::{ }; #[doc(inline)] -pub use crate::docs_engine::{Origin, SyncEvent, SyncReason}; +pub use iroh_docs::engine::{Origin, SyncEvent, SyncReason}; use super::{blobs, flatten}; @@ -588,13 +588,13 @@ pub enum LiveEvent { PendingContentReady, } -impl From for LiveEvent { - fn from(event: crate::docs_engine::LiveEvent) -> LiveEvent { +impl From for LiveEvent { + fn from(event: crate::docs::engine::LiveEvent) -> LiveEvent { match event { - crate::docs_engine::LiveEvent::InsertLocal { entry } => Self::InsertLocal { + crate::docs::engine::LiveEvent::InsertLocal { entry } => Self::InsertLocal { entry: entry.into(), }, - crate::docs_engine::LiveEvent::InsertRemote { + crate::docs::engine::LiveEvent::InsertRemote { from, entry, content_status, @@ -603,11 +603,11 @@ impl From for LiveEvent { content_status, entry: entry.into(), }, - crate::docs_engine::LiveEvent::ContentReady { hash } => Self::ContentReady { hash }, - crate::docs_engine::LiveEvent::NeighborUp(node) => Self::NeighborUp(node), - crate::docs_engine::LiveEvent::NeighborDown(node) => Self::NeighborDown(node), - crate::docs_engine::LiveEvent::SyncFinished(details) => Self::SyncFinished(details), - crate::docs_engine::LiveEvent::PendingContentReady => Self::PendingContentReady, + crate::docs::engine::LiveEvent::ContentReady { hash } => Self::ContentReady { hash }, + crate::docs::engine::LiveEvent::NeighborUp(node) => Self::NeighborUp(node), + crate::docs::engine::LiveEvent::NeighborDown(node) => Self::NeighborDown(node), + crate::docs::engine::LiveEvent::SyncFinished(details) => Self::SyncFinished(details), + crate::docs::engine::LiveEvent::PendingContentReady => Self::PendingContentReady, } } } diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index 275c23459e..335b962582 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -22,7 +22,6 @@ pub mod client; pub mod node; pub mod util; -mod docs_engine; mod rpc_protocol; /// Expose metrics module diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 058363276f..7e0c6c2975 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -13,6 +13,7 @@ use futures_lite::StreamExt; use iroh_base::key::PublicKey; use iroh_blobs::downloader::Downloader; use iroh_blobs::store::Store as BaoStore; +use iroh_docs::engine::Engine; use iroh_net::util::AbortingJoinHandle; use iroh_net::{endpoint::LocalEndpointsStream, key::SecretKey, Endpoint}; use quic_rpc::transport::flume::FlumeConnection; @@ -23,7 +24,6 @@ use tokio_util::task::LocalPoolHandle; use tracing::debug; use crate::client::RpcService; -use crate::docs_engine::Engine; mod builder; mod rpc; @@ -60,7 +60,7 @@ struct NodeInner { gc_task: Option>, #[debug("rt")] rt: LocalPoolHandle, - pub(crate) sync: Engine, + pub(crate) sync: DocsEngine, downloader: Downloader, } @@ -193,6 +193,17 @@ impl NodeInner { } } +/// Wrapper around [`Engine`] so that we can implement our RPC methods directly. +#[derive(Debug, Clone)] +pub(crate) struct DocsEngine(Engine); + +impl std::ops::Deref for DocsEngine { + type Target = Engine; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + #[cfg(test)] mod tests { use std::time::Duration; diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 7c9875f3c1..db935479f2 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -14,6 +14,7 @@ use iroh_blobs::{ protocol::Closed, store::{GcMarkEvent, GcSweepEvent, Map, Store as BaoStore}, }; +use iroh_docs::engine::{DefaultAuthorStorage, Engine}; use iroh_docs::net::DOCS_ALPN; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_net::{ @@ -32,13 +33,11 @@ use tracing::{debug, error, error_span, info, trace, warn, Instrument}; use crate::{ client::RPC_ALPN, - docs_engine::{DefaultAuthorStorage, Engine}, - node::NodeInner, rpc_protocol::RpcService, util::{fs::load_secret_key, path::IrohPaths}, }; -use super::{rpc, rpc_status::RpcStatus, Node}; +use super::{rpc, rpc_status::RpcStatus, DocsEngine, Node, NodeInner}; pub const PROTOCOLS: [&[u8]; 3] = [iroh_blobs::protocol::ALPN, GOSSIP_ALPN, DOCS_ALPN]; @@ -466,6 +465,7 @@ where ) .await?; let sync_db = sync.sync.clone(); + let sync = DocsEngine(sync); let gc_task = if let GcPolicy::Interval(gc_period) = self.gc_policy { tracing::info!("Starting GC task with interval {:?}", gc_period); @@ -575,7 +575,7 @@ where // clean shutdown of the blobs db to close the write transaction handler.inner.db.shutdown().await; - if let Err(err) = handler.inner.sync.start_shutdown().await { + if let Err(err) = handler.inner.sync.shutdown().await { warn!("sync shutdown error: {:?}", err); } break @@ -737,7 +737,7 @@ async fn handle_connection( alpn: String, node: Arc>, gossip: Gossip, - sync: Engine, + sync: DocsEngine, ) -> Result<()> { match alpn.as_bytes() { GOSSIP_ALPN => gossip.handle_connection(connecting.await?).await?, diff --git a/iroh/src/node/rpc.rs b/iroh/src/node/rpc.rs index ba03e10486..fc500b7566 100644 --- a/iroh/src/node/rpc.rs +++ b/iroh/src/node/rpc.rs @@ -54,6 +54,8 @@ use crate::rpc_protocol::{ use super::NodeInner; +mod docs; + const HEALTH_POLL_WAIT: Duration = Duration::from_secs(1); /// Chunk size for getting blobs over RPC const RPC_BLOB_GET_CHUNK_SIZE: usize = 1024 * 64; diff --git a/iroh/src/docs_engine/rpc.rs b/iroh/src/node/rpc/docs.rs similarity index 88% rename from iroh/src/docs_engine/rpc.rs rename to iroh/src/node/rpc/docs.rs index 76f2afd761..a0433a803e 100644 --- a/iroh/src/docs_engine/rpc.rs +++ b/iroh/src/node/rpc/docs.rs @@ -1,4 +1,4 @@ -//! This module contains an impl block on [`Engine`] with handlers for RPC requests +//! This module contains an impl block on [`DocsEngine`] with handlers for RPC requests use anyhow::anyhow; use futures_lite::Stream; @@ -7,33 +7,28 @@ use iroh_docs::{Author, DocTicket, NamespaceSecret}; use tokio_stream::StreamExt; use crate::client::docs::ShareMode; +use crate::node::DocsEngine; use crate::rpc_protocol::{ - AuthorDeleteRequest, AuthorDeleteResponse, AuthorExportRequest, AuthorExportResponse, - AuthorGetDefaultRequest, AuthorGetDefaultResponse, AuthorImportRequest, AuthorImportResponse, - AuthorSetDefaultRequest, AuthorSetDefaultResponse, DocGetSyncPeersRequest, - DocGetSyncPeersResponse, -}; -use crate::{ - docs_engine::Engine, - rpc_protocol::{ - AuthorCreateRequest, AuthorCreateResponse, AuthorListRequest, AuthorListResponse, - DocCloseRequest, DocCloseResponse, DocCreateRequest, DocCreateResponse, DocDelRequest, - DocDelResponse, DocDropRequest, DocDropResponse, DocGetDownloadPolicyRequest, - DocGetDownloadPolicyResponse, DocGetExactRequest, DocGetExactResponse, DocGetManyRequest, - DocGetManyResponse, DocImportRequest, DocImportResponse, DocLeaveRequest, DocLeaveResponse, - DocListRequest, DocListResponse, DocOpenRequest, DocOpenResponse, - DocSetDownloadPolicyRequest, DocSetDownloadPolicyResponse, DocSetHashRequest, - DocSetHashResponse, DocSetRequest, DocSetResponse, DocShareRequest, DocShareResponse, - DocStartSyncRequest, DocStartSyncResponse, DocStatusRequest, DocStatusResponse, - DocSubscribeRequest, DocSubscribeResponse, RpcResult, - }, + AuthorCreateRequest, AuthorCreateResponse, AuthorDeleteRequest, AuthorDeleteResponse, + AuthorExportRequest, AuthorExportResponse, AuthorGetDefaultRequest, AuthorGetDefaultResponse, + AuthorImportRequest, AuthorImportResponse, AuthorListRequest, AuthorListResponse, + AuthorSetDefaultRequest, AuthorSetDefaultResponse, DocCloseRequest, DocCloseResponse, + DocCreateRequest, DocCreateResponse, DocDelRequest, DocDelResponse, DocDropRequest, + DocDropResponse, DocGetDownloadPolicyRequest, DocGetDownloadPolicyResponse, DocGetExactRequest, + DocGetExactResponse, DocGetManyRequest, DocGetManyResponse, DocGetSyncPeersRequest, + DocGetSyncPeersResponse, DocImportRequest, DocImportResponse, DocLeaveRequest, + DocLeaveResponse, DocListRequest, DocListResponse, DocOpenRequest, DocOpenResponse, + DocSetDownloadPolicyRequest, DocSetDownloadPolicyResponse, DocSetHashRequest, + DocSetHashResponse, DocSetRequest, DocSetResponse, DocShareRequest, DocShareResponse, + DocStartSyncRequest, DocStartSyncResponse, DocStatusRequest, DocStatusResponse, + DocSubscribeRequest, DocSubscribeResponse, RpcResult, }; /// Capacity for the flume channels to forward sync store iterators to async RPC streams. const ITER_CHANNEL_CAP: usize = 64; #[allow(missing_docs)] -impl Engine { +impl DocsEngine { pub async fn author_create( &self, _req: AuthorCreateRequest, diff --git a/iroh/src/rpc_protocol.rs b/iroh/src/rpc_protocol.rs index 7bfb5d60b3..ccfbc45671 100644 --- a/iroh/src/rpc_protocol.rs +++ b/iroh/src/rpc_protocol.rs @@ -41,15 +41,13 @@ use serde::{Deserialize, Serialize}; pub use iroh_base::rpc::{RpcError, RpcResult}; use iroh_blobs::store::{ExportFormat, ExportMode}; pub use iroh_blobs::{provider::AddProgress, store::ValidateProgress}; +use iroh_docs::engine::LiveEvent; -use crate::{ - client::{ - blobs::{BlobInfo, CollectionInfo, DownloadMode, IncompleteBlobInfo, WrapOption}, - docs::{ImportProgress, ShareMode}, - tags::TagInfo, - NodeStatus, - }, - docs_engine::LiveEvent, +use crate::client::{ + blobs::{BlobInfo, CollectionInfo, DownloadMode, IncompleteBlobInfo, WrapOption}, + docs::{ImportProgress, ShareMode}, + tags::TagInfo, + NodeStatus, }; pub use iroh_blobs::util::SetTagOption;