From 8e0b090197f2d7360034eb3d344e2a0a1fa6a3f5 Mon Sep 17 00:00:00 2001 From: Amr Bashir Date: Wed, 4 Oct 2023 06:19:25 +0300 Subject: [PATCH 1/8] chore: `tauri-mobile` -> `cargo-mobile2` (#331) --- Dockerfile.aarch64-android | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.aarch64-android b/Dockerfile.aarch64-android index 21334cc71..d216b1e3a 100644 --- a/Dockerfile.aarch64-android +++ b/Dockerfile.aarch64-android @@ -24,7 +24,7 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y # RUN . $HOME/.cargo/env && rustup target add aarch64-unknown-linux-gnu # RUN . $HOME/.cargo/env && rustup toolchain install stable-aarch64-unknown-linux-gnu RUN . $HOME/.cargo/env && rustup target add aarch64-linux-android -RUN . $HOME/.cargo/env && cargo install --git https://github.com/tauri-apps/tauri-mobile +RUN . $HOME/.cargo/env && cargo install --git https://github.com/tauri-apps/cargo-mobile2 WORKDIR /root/cmake RUN wget https://github.com/Kitware/CMake/releases/download/v3.23.1/cmake-3.23.1.tar.gz From 8bcb537ff205e4443f0501bc92a01b075905d393 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Wed, 4 Oct 2023 14:58:53 -0400 Subject: [PATCH 2/8] refactor: Dont download attachment using constellation api (#332) --- extensions/warp-ipfs/src/store/message.rs | 173 ++++++---------------- 1 file changed, 48 insertions(+), 125 deletions(-) diff --git a/extensions/warp-ipfs/src/store/message.rs b/extensions/warp-ipfs/src/store/message.rs index 219074f70..37186388a 100644 --- a/extensions/warp-ipfs/src/store/message.rs +++ b/extensions/warp-ipfs/src/store/message.rs @@ -11,12 +11,10 @@ use futures::channel::mpsc::{unbounded, Sender, UnboundedSender}; use futures::channel::oneshot::{self, Sender as OneshotSender}; use futures::stream::{FuturesUnordered, SelectAll}; use futures::{SinkExt, Stream, StreamExt}; -use rust_ipfs::libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; -use rust_ipfs::{Ipfs, PeerId, SubscriptionStream}; +use rust_ipfs::{Ipfs, IpfsPath, PeerId, SubscriptionStream}; use libipld::Cid; use serde::{Deserialize, Serialize}; -use tokio::io::AsyncWriteExt; use tokio::sync::broadcast::{self, Receiver as BroadcastReceiver, Sender as BroadcastSender}; use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_stream::wrappers::ReadDirStream; @@ -47,7 +45,7 @@ use super::document::utils::{GetLocalDag, ToCid}; use super::friends::FriendsStore; use super::identity::IdentityStore; use super::keystore::Keystore; -use super::{did_to_libp2p_pub, verify_serde_sig, ConversationEvents, DidExt, MessagingEvents}; +use super::{did_to_libp2p_pub, verify_serde_sig, ConversationEvents, MessagingEvents}; const PERMIT_AMOUNT: usize = 1; @@ -926,32 +924,6 @@ impl MessageStore { spam_check(&mut message, self.spam_filter.clone())?; let conversation_id = message.conversation_id(); - if message.message_type() == MessageType::Attachment - && direction == MessageDirection::In - { - if let Some(fs) = self.filesystem.clone() { - let dir = fs.root_directory(); - for file in message.attachments() { - let original = file.name(); - let mut inc = 0; - loop { - if dir.has_item(&original) { - if inc >= 20 { - break; - } - inc += 1; - file.set_name(&format!("{original}-{inc}")); - continue; - } - break; - } - if let Err(e) = dir.add_file(file) { - error!("Error adding file to constellation: {e}"); - } - } - } - } - let message_id = message.id(); let message_document = @@ -3506,111 +3478,62 @@ impl MessageStore { .cloned() .ok_or(Error::FileNotFound)?; - let root = constellation.root_directory(); - if !root.has_item(&attachment.name()) { - root.add_file(attachment.clone())?; - } + let _root = constellation.root_directory(); + + let reference = attachment + .reference() + .and_then(|reference| IpfsPath::from_str(&reference).ok()) + .ok_or(Error::FileNotFound)?; let ipfs = self.ipfs.clone(); - let constellation = constellation.clone(); - let own_did = self.did.clone(); + let _constellation = constellation.clone(); let progress_stream = async_stream::stream! { - yield Progression::CurrentProgress { - name: attachment.name(), - current: 0, - total: Some(attachment.size()), - }; - - let did = message.sender(); - if !did.eq(&own_did) { - if let Ok(peer_id) = did.to_peer_id() { - //This is done to insure we can successfully exchange blocks - let opt = DialOpts::peer_id(peer_id).condition(PeerCondition::NotDialing).build(); - if let Err(e) = ipfs.connect(opt).await { - warn!("Issue performing a connection to peer: {e}"); - } - } + let stream = match ipfs.get_unixfs(reference, &path).await { + Ok(stream) => stream, + Err(e) => { + yield Progression::ProgressFailed { + name: attachment.name(), + last_size: None, + error: Some(e.to_string()), + }; + return; } + }; - let mut file = match tokio::fs::File::create(&path).await { - Ok(file) => file, - Err(e) => { - error!("Error creating file: {e}"); - yield Progression::ProgressFailed { - name: attachment.name(), - last_size: None, - error: Some(e.to_string()), - }; - return; - } - }; + yield Progression::CurrentProgress { + name: attachment.name(), + current: 0, + total: Some(attachment.size()), + }; - let stream = match constellation.get_stream(&attachment.name()).await { - Ok(s) => s, - Err(e) => { - error!("Error creating stream: {e}"); - yield Progression::ProgressFailed { - name: attachment.name(), - last_size: None, - error: Some(e.to_string()), + for await event in stream { + match event { + rust_ipfs::unixfs::UnixfsStatus::ProgressStatus { written, total_size } => { + yield Progression::CurrentProgress { + name: attachment.name(), + current: written, + total: total_size }; - return; - } - }; - - let mut written = 0; - let mut failed = false; - for await res in stream { - match res { - Ok(bytes) => match file.write_all(&bytes).await { - Ok(_) => { - written += bytes.len(); - yield Progression::CurrentProgress { - name: attachment.name(), - current: written, - total: Some(attachment.size()), - }; - } - Err(e) => { - error!("Error writing to disk: {e}"); - yield Progression::ProgressFailed { - name: attachment.name(), - last_size: Some(written), - error: Some(e.to_string()), - }; - failed = true; - break; - } - }, - Err(e) => { - error!("Error reading from stream: {e}"); - yield Progression::ProgressFailed { - name: attachment.name(), - last_size: Some(written), - error: Some(e.to_string()), - }; - failed = true; - break; + }, + rust_ipfs::unixfs::UnixfsStatus::CompletedStatus { total_size, .. } => { + yield Progression::ProgressComplete { + name: attachment.name(), + total: total_size, + }; + }, + rust_ipfs::unixfs::UnixfsStatus::FailedStatus { written, error, .. } => { + if let Err(e) = tokio::fs::remove_file(&path).await { + error!("Error removing file: {e}"); } - } - } - - if failed { - if let Err(e) = tokio::fs::remove_file(&path).await { - error!("Error removing file: {e}"); - } - } - - if !failed { - if let Err(e) = file.flush().await { - error!("Error flushing stream: {e}"); - } - yield Progression::ProgressComplete { - name: attachment.name(), - total: Some(written), - }; + yield Progression::ProgressFailed { + name: attachment.name(), + last_size: Some(written), + error: error.map(|e| e.to_string()), + }; + }, } + } }; Ok(ConstellationProgressStream(progress_stream.boxed())) From 92e52b26fd0a168c8bfee03326fab065b75c81db Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Wed, 4 Oct 2023 15:24:12 -0400 Subject: [PATCH 3/8] chore: Update dependency (#333) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a5a8e7035..87ab302c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ either = "1" void = "1" #ipfs dependency -rust-ipfs = "0.4.4" +rust-ipfs = "0.4.6" # Blink related crates # av-data is needed to use libaom. need to ensure that Warp and libaom use the same version of av-data From 2ae7f3884dbbfc04edbac8c0f3727d2982150bba Mon Sep 17 00:00:00 2001 From: sdwoodbury Date: Wed, 4 Oct 2023 17:25:35 -0400 Subject: [PATCH 4/8] feat(raygun): use MessageOptions.limit when fetching messages (#330) Co-authored-by: Darius Clark Co-authored-by: Darius --- .../warp-ipfs/src/store/conversation.rs | 19 ++++++++++++------- warp/src/raygun/mod.rs | 6 +++--- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/extensions/warp-ipfs/src/store/conversation.rs b/extensions/warp-ipfs/src/store/conversation.rs index caa0ff4cc..e59292bc1 100644 --- a/extensions/warp-ipfs/src/store/conversation.rs +++ b/extensions/warp-ipfs/src/store/conversation.rs @@ -376,8 +376,11 @@ impl ConversationDocument { let ipfs = ipfs.clone(); let stream = async_stream::stream! { - + let mut remaining = option.limit(); for (index, document) in messages.iter().enumerate() { + if remaining.as_ref().map(|x| *x == 0).unwrap_or_default() { + break; + } if let Some(range) = option.range() { if range.start > index || range.end < index { continue @@ -388,20 +391,22 @@ impl ConversationDocument { continue } } - if let Ok(message) = document.resolve(&ipfs, &did, keystore.as_ref()).await { if option.pinned() && !message.pinned() { continue; } - if let Some(keyword) = option.keyword() { - if message + let should_yield = if let Some(keyword) = option.keyword() { + message .value() .iter() .any(|line| line.to_lowercase().contains(&keyword.to_lowercase())) - { - yield message; - } } else { + true + }; + if should_yield { + if let Some(remaining) = remaining.as_mut() { + *remaining = remaining.saturating_sub(1); + } yield message; } } diff --git a/warp/src/raygun/mod.rs b/warp/src/raygun/mod.rs index 5c638fdcc..f872ab01e 100644 --- a/warp/src/raygun/mod.rs +++ b/warp/src/raygun/mod.rs @@ -189,7 +189,7 @@ pub struct MessageOptions { keyword: Option, pinned: bool, range: Option>, - limit: Option, + limit: Option, skip: Option, } @@ -204,7 +204,7 @@ impl MessageOptions { self } - pub fn set_limit(mut self, limit: i64) -> MessageOptions { + pub fn set_limit(mut self, limit: u8) -> MessageOptions { self.limit = Some(limit); self } @@ -253,7 +253,7 @@ impl MessageOptions { } impl MessageOptions { - pub fn limit(&self) -> Option { + pub fn limit(&self) -> Option { self.limit } From 15c125a92712438cd8c919b87c4073c28442f16a Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Fri, 6 Oct 2023 02:10:30 -0400 Subject: [PATCH 5/8] chore: Update dependency (#334) --- Cargo.toml | 2 +- .../warp-ipfs/src/store/document/identity.rs | 2 +- extensions/warp-ipfs/src/store/files.rs | 2 +- extensions/warp-ipfs/src/store/identity.rs | 6 ++++-- extensions/warp-ipfs/src/store/message.rs | 15 +++++++-------- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 87ab302c3..9d8995a6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ either = "1" void = "1" #ipfs dependency -rust-ipfs = "0.4.6" +rust-ipfs = "0.5.0" # Blink related crates # av-data is needed to use libaom. need to ensure that Warp and libaom use the same version of av-data diff --git a/extensions/warp-ipfs/src/store/document/identity.rs b/extensions/warp-ipfs/src/store/document/identity.rs index 2ccd7c7d4..e4a69c035 100644 --- a/extensions/warp-ipfs/src/store/document/identity.rs +++ b/extensions/warp-ipfs/src/store/document/identity.rs @@ -145,7 +145,7 @@ pub async fn unixfs_fetch( let fut = async { let stream = ipfs .unixfs() - .cat(IpfsPath::from(cid), None, &[], local) + .cat(IpfsPath::from(cid), None, &[], local, None) .await .map_err(anyhow::Error::from)?; diff --git a/extensions/warp-ipfs/src/store/files.rs b/extensions/warp-ipfs/src/store/files.rs index 62290e788..26800f6d2 100644 --- a/extensions/warp-ipfs/src/store/files.rs +++ b/extensions/warp-ipfs/src/store/files.rs @@ -127,7 +127,7 @@ impl FileStore { let mut index_stream = self .ipfs .unixfs() - .cat(IpfsPath::from(cid), None, &[], true) + .cat(IpfsPath::from(cid), None, &[], true, None) .await .map_err(anyhow::Error::from)? .boxed(); diff --git a/extensions/warp-ipfs/src/store/identity.rs b/extensions/warp-ipfs/src/store/identity.rs index 6680f4df5..ea64e282a 100644 --- a/extensions/warp-ipfs/src/store/identity.rs +++ b/extensions/warp-ipfs/src/store/identity.rs @@ -1107,6 +1107,7 @@ impl IdentityStore { None, &[], false, + None, ) .await? .boxed(); @@ -1163,6 +1164,7 @@ impl IdentityStore { None, &[], false, + None, ) .await? .boxed(); @@ -1263,7 +1265,7 @@ impl IdentityStore { async move { let mut stream = ipfs .unixfs() - .cat(picture, None, &[], false) + .cat(picture, None, &[], false, None) .await? .boxed(); @@ -1291,7 +1293,7 @@ impl IdentityStore { async move { let mut stream = ipfs .unixfs() - .cat(banner, None, &[], false) + .cat(banner, None, &[], false, None) .await? .boxed(); while let Some(_d) = stream.next().await { diff --git a/extensions/warp-ipfs/src/store/message.rs b/extensions/warp-ipfs/src/store/message.rs index 37186388a..a6745129f 100644 --- a/extensions/warp-ipfs/src/store/message.rs +++ b/extensions/warp-ipfs/src/store/message.rs @@ -3487,8 +3487,13 @@ impl MessageStore { let ipfs = self.ipfs.clone(); let _constellation = constellation.clone(); - let progress_stream = async_stream::stream! { + yield Progression::CurrentProgress { + name: attachment.name(), + current: 0, + total: Some(attachment.size()), + }; + let stream = match ipfs.get_unixfs(reference, &path).await { Ok(stream) => stream, Err(e) => { @@ -3498,13 +3503,7 @@ impl MessageStore { error: Some(e.to_string()), }; return; - } - }; - - yield Progression::CurrentProgress { - name: attachment.name(), - current: 0, - total: Some(attachment.size()), + }, }; for await event in stream { From b9e2749429aee37310591351cbb3b0069466e369 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Wed, 11 Oct 2023 16:18:34 -0400 Subject: [PATCH 6/8] refactor: Merge Identity and Friend modules, misc (#335) --- extensions/warp-ipfs/examples/messenger.rs | 67 +- extensions/warp-ipfs/src/behaviour/mod.rs | 2 +- extensions/warp-ipfs/src/config.rs | 77 +- extensions/warp-ipfs/src/lib.rs | 109 +- extensions/warp-ipfs/src/store/document.rs | 64 +- .../warp-ipfs/src/store/document/cache.rs | 331 +++ .../warp-ipfs/src/store/document/identity.rs | 104 +- .../warp-ipfs/src/store/document/root.rs | 625 +++++ extensions/warp-ipfs/src/store/files.rs | 90 +- extensions/warp-ipfs/src/store/friends.rs | 1058 -------- extensions/warp-ipfs/src/store/identity.rs | 2360 +++++++++-------- extensions/warp-ipfs/src/store/keystore.rs | 3 +- extensions/warp-ipfs/src/store/message.rs | 58 +- extensions/warp-ipfs/src/store/mod.rs | 1 - extensions/warp-ipfs/src/store/phonebook.rs | 7 - extensions/warp-ipfs/src/store/queue.rs | 2 +- extensions/warp-ipfs/tests/accounts.rs | 38 + 17 files changed, 2483 insertions(+), 2513 deletions(-) create mode 100644 extensions/warp-ipfs/src/store/document/cache.rs create mode 100644 extensions/warp-ipfs/src/store/document/root.rs delete mode 100644 extensions/warp-ipfs/src/store/friends.rs diff --git a/extensions/warp-ipfs/examples/messenger.rs b/extensions/warp-ipfs/examples/messenger.rs index 0f2703e3b..04608c2a1 100644 --- a/extensions/warp-ipfs/examples/messenger.rs +++ b/extensions/warp-ipfs/examples/messenger.rs @@ -38,8 +38,6 @@ struct Opt { experimental_node: bool, #[clap(long)] stdout_log: bool, - #[clap(long)] - disable_sender_emitter: bool, #[clap(long)] context: Option, @@ -129,7 +127,6 @@ async fn setup>( } config.store_setting.friend_request_response_duration = opt.wait.map(Duration::from_millis); - config.store_setting.disable_sender_event_emit = opt.disable_sender_emitter; config.ipfs_setting.mdns.enable = opt.mdns; let (mut account, raygun, filesystem) = WarpIpfsBuilder::default() @@ -324,36 +321,8 @@ async fn main() -> anyhow::Result<()> { continue } }; - // Note: This is one way to handle it outside of the event stream - if opt.disable_sender_emitter { - let id = match chat.create_conversation(&did).await { - Ok(id) => id, - Err(e) => { - writeln!(stdout, "Error creating conversation: {e}")?; - continue - } - }; - *topic.write() = id.id(); - writeln!(stdout, "Set conversation to {}", topic.read())?; - let mut stdout = stdout.clone(); - let account = new_account.clone(); - let stream = chat.get_conversation_stream(id.id()).await?; - let chat = chat.clone(); - let topic = topic.clone(); - - tokio::spawn(async move { - if let Err(e) = message_event_handle( - stdout.clone(), - account.clone(), - chat.clone(), - stream, - topic.clone(), - ).await { - writeln!(stdout, ">> Error processing event task: {e}").unwrap(); - } - }); - } else if let Err(e) = chat.create_conversation(&did).await { + if let Err(e) = chat.create_conversation(&did).await { writeln!(stdout, "Error creating conversation: {e}")?; continue } @@ -422,37 +391,9 @@ async fn main() -> anyhow::Result<()> { did_keys.push(did); } - if opt.disable_sender_emitter { - let id = match chat.create_group_conversation(Some(name.to_string()), did_keys).await { - Ok(id) => id, - Err(e) => { - writeln!(stdout, "Error creating conversation: {e}")?; - continue - } - }; - - *topic.write() = id.id(); - writeln!(stdout, "Set conversation to {}", topic.read())?; - let mut stdout = stdout.clone(); - let account = new_account.clone(); - let stream = chat.get_conversation_stream(id.id()).await?; - let chat = chat.clone(); - let topic = topic.clone(); - - tokio::spawn(async move { - if let Err(e) = message_event_handle( - stdout.clone(), - account.clone(), - chat.clone(), - stream, - topic.clone(), - ).await { - writeln!(stdout, ">> Error processing event task: {e}").unwrap(); - } - }); - } else if let Err(e) = chat.create_group_conversation(Some(name.to_string()), did_keys).await { - writeln!(stdout, "Error creating conversation: {e}")?; - continue + if let Err(e) = chat.create_group_conversation(Some(name.to_string()), did_keys).await { + writeln!(stdout, "Error creating conversation: {e}")?; + continue } }, Some("/remove-conversation") => { diff --git a/extensions/warp-ipfs/src/behaviour/mod.rs b/extensions/warp-ipfs/src/behaviour/mod.rs index 29fa5ee69..373911a0e 100644 --- a/extensions/warp-ipfs/src/behaviour/mod.rs +++ b/extensions/warp-ipfs/src/behaviour/mod.rs @@ -1,5 +1,5 @@ -pub mod phonebook; pub mod discovery; +pub mod phonebook; use libp2p::swarm::NetworkBehaviour; use rust_ipfs::libp2p; diff --git a/extensions/warp-ipfs/src/config.rs b/extensions/warp-ipfs/src/config.rs index 61d6a9cf2..834d798e3 100644 --- a/extensions/warp-ipfs/src/config.rs +++ b/extensions/warp-ipfs/src/config.rs @@ -8,6 +8,39 @@ use std::{ }; use warp::multipass::identity::Identity; +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum Network { + /// IPFS Public Network + #[default] + Ipfs, + /// Satellite Network + Satellite { addresses: Vec }, + /// Custom Network + Custom { addresses: Vec }, + /// No network selection. + None, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct NetworkAddress { + /// Address of the node + pub address: Multiaddr, + /// Type for the network. + /// - DHT + /// - Relay + /// - RzPoint + pub network_type: Vec, +} + +#[derive(Debug, Deserialize, Serialize, Clone, Copy)] +#[serde(rename_all = "snake_case")] +pub enum NetworkType { + DHT, + RzPoint, + Relay, +} + #[derive(Default, Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum Bootstrap { @@ -232,19 +265,13 @@ pub struct StoreSetting { pub auto_push: Option, /// Discovery type pub discovery: Discovery, - #[serde(skip_serializing_if = "Vec::is_empty")] - /// Placeholder for a offline agents to obtain information regarding one own identity - pub sync: Vec, - /// Interval to push or check node - pub sync_interval: Duration, /// Fetch data over bitswap instead of pubsub pub fetch_over_bitswap: bool, /// Enables sharing platform (Desktop, Mobile, Web) information to another user pub share_platform: bool, - /// Enables phonebook service - pub use_phonebook: bool, /// Emit event for when a friend comes online or offline pub emit_online_event: bool, + #[serde(skip_serializing_if = "Option::is_none")] /// Waits for a response from peer for a specific duration pub friend_request_response_duration: Option, /// Options to allow emitting identity events to all or just friends @@ -253,23 +280,23 @@ pub struct StoreSetting { pub disable_images: bool, /// Enables spam check pub check_spam: bool, - - /// Load conversation in a separate task - /// Note: While this is loaded in a separate task, not all conversations will be made available up front. - /// If any conversations are corrupted for whatever reason they will not be made available - pub conversation_load_task: bool, - - /// Attaches recipients to the local message block - pub attach_recipients_on_storing: bool, - - /// Disables emitting an event on stream for creating a conversation - pub disable_sender_event_emit: bool, - /// Function to call to provide data for a default profile picture if one is not apart of the identity #[serde(skip)] pub default_profile_picture: Option, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum SynchronizeType { + /// Export locally to a file + Local { path: PathBuf }, + /// Export remotely to a service + Remote, + /// Export locally and remotely + RemoteLocal { path: PathBuf }, + /// Dont export + None, +} + impl std::fmt::Debug for StoreSetting { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("StoreSetting").finish() @@ -284,19 +311,13 @@ impl Default for StoreSetting { namespace: None, discovery_type: Default::default(), }, - sync: Vec::new(), - sync_interval: Duration::from_millis(1000), fetch_over_bitswap: false, share_platform: false, - use_phonebook: true, friend_request_response_duration: None, emit_online_event: false, update_events: Default::default(), disable_images: false, check_spam: true, - attach_recipients_on_storing: false, - conversation_load_task: false, - disable_sender_event_emit: false, with_friends: false, default_profile_picture: None, } @@ -307,13 +328,13 @@ impl Default for StoreSetting { pub struct Config { #[serde(skip_serializing_if = "Option::is_none")] pub path: Option, + pub network: Network, pub bootstrap: Bootstrap, #[serde(skip_serializing_if = "Vec::is_empty")] pub listen_on: Vec, pub ipfs_setting: IpfsSetting, pub store_setting: StoreSetting, pub enable_relay: bool, - pub debug: bool, pub save_phrase: bool, pub max_storage_size: Option, pub max_file_size: Option, @@ -321,12 +342,14 @@ pub struct Config { pub chunking: Option, pub thumbnail_task: bool, pub thumbnail_exact_format: bool, + pub synchronize_type: SynchronizeType, } impl Default for Config { fn default() -> Self { Config { path: None, + network: Network::Ipfs, bootstrap: Bootstrap::Ipfs, listen_on: ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"] .iter() @@ -338,7 +361,6 @@ impl Default for Config { ..Default::default() }, store_setting: Default::default(), - debug: false, enable_relay: false, save_phrase: false, max_storage_size: Some(1024 * 1024 * 1024), @@ -347,6 +369,7 @@ impl Default for Config { chunking: None, thumbnail_task: false, thumbnail_exact_format: true, + synchronize_type: SynchronizeType::None, } } } diff --git a/extensions/warp-ipfs/src/lib.rs b/extensions/warp-ipfs/src/lib.rs index e1942d8ea..30342ed76 100644 --- a/extensions/warp-ipfs/src/lib.rs +++ b/extensions/warp-ipfs/src/lib.rs @@ -28,13 +28,12 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use store::document::ExtractedRootDocument; use store::files::FileStore; -use store::friends::FriendsStore; use store::identity::{IdentityStore, LookupBy}; use store::message::MessageStore; use tokio::sync::broadcast; use tokio_util::compat::TokioAsyncReadCompatExt; -use tracing::debug; use tracing::log::{error, info, warn}; +use tracing::{debug, trace}; use utils::ExtensionType; use uuid::Uuid; use warp::constellation::directory::Directory; @@ -78,7 +77,6 @@ pub struct WarpIpfs { identity_guard: Arc>, ipfs: Arc>>, tesseract: Tesseract, - friend_store: Arc>>, identity_store: Arc>>, message_store: Arc>>, file_store: Arc>>, @@ -148,7 +146,6 @@ impl WarpIpfs { config, tesseract, ipfs: Default::default(), - friend_store: Default::default(), identity_store: Default::default(), message_store: Default::default(), file_store: Default::default(), @@ -179,9 +176,7 @@ impl WarpIpfs { async fn initialize_store(&self, init: bool) -> anyhow::Result<()> { let tesseract = self.tesseract.clone(); - if init && self.identity_store.read().is_some() && self.friend_store.read().is_some() - || self.initialized.load(Ordering::SeqCst) - { + if init && self.identity_store.read().is_some() || self.initialized.load(Ordering::SeqCst) { warn!("Identity is already loaded"); anyhow::bail!(Error::IdentityExist) } @@ -456,41 +451,20 @@ impl WarpIpfs { relays.clone(), ); + info!("Initializing identity profile"); let identity_store = IdentityStore::new( ipfs.clone(), config.path.clone(), tesseract.clone(), - config.store_setting.auto_push, - self.multipass_tx.clone(), - config.store_setting.default_profile_picture.clone(), - ( - discovery.clone(), - relays, - config.store_setting.fetch_over_bitswap, - config.store_setting.share_platform, - config.store_setting.update_events, - config.store_setting.disable_images, - ), - ) - .await?; - info!("Identity store initialized"); - - let friend_store = FriendsStore::new( - ipfs.clone(), - identity_store.clone(), - discovery.clone(), - config.clone(), - tesseract.clone(), self.multipass_tx.clone(), pb_tx, + &config, + discovery.clone(), ) .await?; - info!("friends store initialized"); - - identity_store.set_friend_store(friend_store.clone()).await; + info!("Identity initialized"); *self.identity_store.write() = Some(identity_store.clone()); - *self.friend_store.write() = Some(friend_store.clone()); *self.ipfs.write() = Some(ipfs.clone()); @@ -503,7 +477,7 @@ impl WarpIpfs { ipfs.clone(), config.path.map(|path| path.join("messages")), identity_store, - friend_store, + // friend_store, discovery, Some(Box::new(self.clone()) as Box), false, @@ -511,9 +485,7 @@ impl WarpIpfs { self.raygun_tx.clone(), ( config.store_setting.check_spam, - config.store_setting.disable_sender_event_emit, config.store_setting.with_friends, - config.store_setting.conversation_load_task, ), ) .await @@ -526,14 +498,6 @@ impl WarpIpfs { Ok(()) } - pub(crate) async fn friend_store(&self) -> Result { - self.identity_store(true).await?; - self.friend_store - .read() - .clone() - .ok_or(Error::MultiPassExtensionUnavailable) - } - pub(crate) async fn identity_store(&self, created: bool) -> Result { let store = self.identity_store_sync()?; if created && !store.local_id_created().await { @@ -587,8 +551,8 @@ impl WarpIpfs { } pub(crate) async fn is_blocked_by(&self, pubkey: &DID) -> Result { - let friends = self.friend_store().await?; - friends.is_blocked_by(pubkey).await + let identity = self.identity_store(true).await?; + identity.is_blocked_by(pubkey).await } } @@ -722,6 +686,9 @@ impl MultiPass for WarpIpfs { maximum: Some(2 * 1024 * 1024), }); } + + trace!("image size = {}", len); + let cid = store .store_photo( futures::stream::iter(Ok::<_, std::io::Error>(Ok(serde_json::to_vec( @@ -732,8 +699,11 @@ impl MultiPass for WarpIpfs { ) .await?; + debug!("Image cid: {cid}"); + if let Some(picture_cid) = identity.profile_picture { if picture_cid == cid { + debug!("Picture is already on document. Not updating identity"); return Ok(()); } @@ -769,6 +739,8 @@ impl MultiPass for WarpIpfs { }); } + trace!("image size = {}", len); + let stream = async_stream::stream! { let mut reader = file.compat(); let mut buffer = vec![0u8; 512]; @@ -792,8 +764,11 @@ impl MultiPass for WarpIpfs { .store_photo(stream.boxed(), Some(2 * 1024 * 1024)) .await?; + debug!("Image cid: {cid}"); + if let Some(picture_cid) = identity.profile_picture { if picture_cid == cid { + debug!("Picture is already on document. Not updating identity"); return Ok(()); } @@ -825,6 +800,8 @@ impl MultiPass for WarpIpfs { }); } + trace!("image size = {}", len); + let cid = store .store_photo( futures::stream::iter(Ok::<_, std::io::Error>(Ok(serde_json::to_vec( @@ -835,8 +812,11 @@ impl MultiPass for WarpIpfs { ) .await?; + debug!("Image cid: {cid}"); + if let Some(banner_cid) = identity.profile_banner { if banner_cid == cid { + debug!("Banner is already on document. Not updating identity"); return Ok(()); } @@ -872,6 +852,8 @@ impl MultiPass for WarpIpfs { }); } + trace!("image size = {}", len); + let stream = async_stream::stream! { let mut reader = file.compat(); let mut buffer = vec![0u8; 512]; @@ -895,8 +877,11 @@ impl MultiPass for WarpIpfs { .store_photo(stream.boxed(), Some(2 * 1024 * 1024)) .await?; + debug!("Image cid: {cid}"); + if let Some(banner_cid) = identity.profile_banner { if banner_cid == cid { + debug!("Banner is already on document. Not updating identity"); return Ok(()); } @@ -944,8 +929,6 @@ impl MultiPass for WarpIpfs { } } - info!("Update identity store"); - store.update_identity().await?; store.push_to_all().await; Ok(()) @@ -1044,7 +1027,7 @@ impl MultiPassImportExport for WarpIpfs { let store = self.identity_store(true).await?; let kp = store.get_keypair_did()?; let ipfs = self.ipfs()?; - let document = store.get_root_document().await?; + let document = store.root_document().get().await?; let exported = document.export(&ipfs).await?; @@ -1068,77 +1051,77 @@ impl MultiPassImportExport for WarpIpfs { #[async_trait::async_trait] impl Friends for WarpIpfs { async fn send_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.send_request(pubkey).await } async fn accept_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.accept_request(pubkey).await } async fn deny_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.reject_request(pubkey).await } async fn close_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.close_request(pubkey).await } async fn list_incoming_request(&self) -> Result, Error> { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.list_incoming_request().await } async fn list_outgoing_request(&self) -> Result, Error> { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.list_outgoing_request().await } async fn received_friend_request_from(&self, did: &DID) -> Result { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.received_friend_request_from(did).await } async fn sent_friend_request_to(&self, did: &DID) -> Result { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.sent_friend_request_to(did).await } async fn remove_friend(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.remove_friend(pubkey, true).await } async fn block(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.block(pubkey).await } async fn is_blocked(&self, did: &DID) -> Result { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.is_blocked(did).await } async fn unblock(&mut self, pubkey: &DID) -> Result<(), Error> { - let mut store = self.friend_store().await?; + let mut store = self.identity_store(true).await?; store.unblock(pubkey).await } async fn block_list(&self) -> Result, Error> { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.block_list().await.map(Vec::from_iter) } async fn list_friends(&self) -> Result, Error> { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.friends_list().await.map(Vec::from_iter) } async fn has_friend(&self, pubkey: &DID) -> Result { - let store = self.friend_store().await?; + let store = self.identity_store(true).await?; store.is_friend(pubkey).await } } diff --git a/extensions/warp-ipfs/src/store/document.rs b/extensions/warp-ipfs/src/store/document.rs index af8caad37..32fee1a0c 100644 --- a/extensions/warp-ipfs/src/store/document.rs +++ b/extensions/warp-ipfs/src/store/document.rs @@ -1,6 +1,9 @@ pub mod identity; pub mod utils; +pub mod cache; +pub mod root; +use futures::TryFutureExt; use ipfs::{Ipfs, IpfsPath}; use libipld::{ serde::{from_ipld, to_ipld}, @@ -19,7 +22,7 @@ use crate::store::get_keypair_did; use self::{identity::IdentityDocument, utils::GetLocalDag}; -use super::friends::Request; +use super::identity::Request; #[async_trait::async_trait] pub(crate) trait ToCid: Sized { @@ -76,7 +79,7 @@ pub struct ExtractedRootDocument { pub friends: Vec, pub block_list: Vec, pub block_by_list: Vec, - pub request: Vec, + pub request: Vec, pub signature: Option>, } @@ -132,14 +135,20 @@ impl RootDocument { #[tracing::instrument(skip(self, ipfs))] pub async fn verify(&self, ipfs: &Ipfs) -> Result<(), Error> { - let (identity, _, _, _, _) = self.resolve(ipfs).await?; + let identity: IdentityDocument = self + .identity + .get_local_dag(ipfs) + .await + .map_err(|_| Error::IdentityInvalid)?; + let mut root_document = self.clone(); let signature = std::mem::take(&mut root_document.signature).ok_or(Error::InvalidSignature)?; let bytes = serde_json::to_vec(&root_document)?; let sig = bs58::decode(&signature).into_vec()?; + identity - .did_key() + .did .verify(&bytes, &sig) .map_err(|_| Error::InvalidSignature)?; Ok(()) @@ -151,38 +160,33 @@ impl RootDocument { &self, ipfs: &Ipfs, ) -> Result<(Identity, Vec, Vec, Vec, Vec), Error> { - let identity = match ipfs - .dag() - .get(IpfsPath::from(self.identity), &[], true) + let document: IdentityDocument = self + .identity + .get_local_dag(ipfs) .await - { - Ok(ipld) => from_ipld::(ipld) - .map_err(anyhow::Error::from) - .map_err(Error::from)? - .resolve()?, - Err(_) => return Err(Error::IdentityInvalid), - }; + .map_err(|_| Error::IdentityInvalid)?; - let mut friends = Default::default(); - let mut block_list = Default::default(); - let mut block_by_list = Default::default(); - let mut request = Default::default(); + let identity = document.resolve()?; - if let Some(document) = &self.friends { - friends = document.get_local_dag(ipfs).await.unwrap_or_default(); - } + let friends = futures::future::ready(self.friends.ok_or(Error::Other)) + .and_then(|document| async move { document.get_local_dag(ipfs).await }) + .await + .unwrap_or_default(); - if let Some(document) = &self.blocks { - block_list = document.get_local_dag(ipfs).await.unwrap_or_default(); - } + let block_list = futures::future::ready(self.blocks.ok_or(Error::Other)) + .and_then(|document| async move { document.get_local_dag(ipfs).await }) + .await + .unwrap_or_default(); - if let Some(document) = &self.block_by { - block_by_list = document.get_local_dag(ipfs).await.unwrap_or_default(); - } + let block_by_list = futures::future::ready(self.block_by.ok_or(Error::Other)) + .and_then(|document| async move { document.get_local_dag(ipfs).await }) + .await + .unwrap_or_default(); - if let Some(document) = &self.request { - request = document.get_local_dag(ipfs).await.unwrap_or_default(); - } + let request = futures::future::ready(self.request.ok_or(Error::Other)) + .and_then(|document| async move { document.get_local_dag(ipfs).await }) + .await + .unwrap_or_default(); Ok((identity, friends, block_list, block_by_list, request)) } diff --git a/extensions/warp-ipfs/src/store/document/cache.rs b/extensions/warp-ipfs/src/store/document/cache.rs new file mode 100644 index 000000000..52cf8863d --- /dev/null +++ b/extensions/warp-ipfs/src/store/document/cache.rs @@ -0,0 +1,331 @@ +use std::{collections::HashSet, path::PathBuf, sync::Arc}; + +use futures::{ + channel::{ + mpsc::{Receiver, Sender}, + oneshot::Sender as OneshotSender, + }, + SinkExt, StreamExt, +}; +use libipld::Cid; +use rust_ipfs::Ipfs; +use warp::{crypto::DID, error::Error}; + +use super::{identity::IdentityDocument, utils::GetLocalDag, ToCid}; + +#[allow(clippy::large_enum_variant)] +enum IdentityCacheCommand { + Insert { + document: IdentityDocument, + response: OneshotSender, Error>>, + }, + Get { + did: DID, + response: OneshotSender>, + }, + Remove { + did: DID, + response: OneshotSender>, + }, + List { + response: OneshotSender, Error>>, + }, +} + +#[derive(Debug, Clone)] +pub struct IdentityCache { + tx: Sender, + task: Arc>, +} + +impl Drop for IdentityCache { + fn drop(&mut self) { + if Arc::strong_count(&self.task) == 1 && !self.task.is_finished() { + self.task.abort(); + } + } +} + +impl IdentityCache { + pub async fn new(ipfs: &Ipfs, path: Option) -> Self { + let list = match path.as_ref() { + Some(path) => tokio::fs::read(path.join(".cache_id")) + .await + .map(|bytes| String::from_utf8_lossy(&bytes).to_string()) + .ok() + .and_then(|cid_str| cid_str.parse().ok()), + None => None, + }; + + let (tx, rx) = futures::channel::mpsc::channel(1); + + let mut task = IdentityCacheTask { + ipfs: ipfs.clone(), + path, + list, + rx, + }; + + let handle = tokio::spawn(async move { + task.start().await; + }); + + Self { + tx, + task: Arc::new(handle), + } + } + + pub async fn insert( + &self, + document: &IdentityDocument, + ) -> Result, Error> { + let (tx, rx) = futures::channel::oneshot::channel(); + + let _ = self + .tx + .clone() + .send(IdentityCacheCommand::Insert { + document: document.clone(), + response: tx, + }) + .await; + + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn get(&self, did: &DID) -> Result { + let (tx, rx) = futures::channel::oneshot::channel(); + + let _ = self + .tx + .clone() + .send(IdentityCacheCommand::Get { + did: did.clone(), + response: tx, + }) + .await; + + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn remove(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = futures::channel::oneshot::channel(); + + let _ = self + .tx + .clone() + .send(IdentityCacheCommand::Remove { + did: did.clone(), + response: tx, + }) + .await; + + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn list(&self) -> Result, Error> { + let (tx, rx) = futures::channel::oneshot::channel(); + + let _ = self + .tx + .clone() + .send(IdentityCacheCommand::List { response: tx }) + .await; + + rx.await.map_err(anyhow::Error::from)? + } +} + +struct IdentityCacheTask { + pub ipfs: Ipfs, + pub path: Option, + pub list: Option, + rx: Receiver, +} + +impl IdentityCacheTask { + pub async fn start(&mut self) { + while let Some(command) = self.rx.next().await { + match command { + IdentityCacheCommand::Insert { document, response } => { + if let Err(e) = document.verify() { + let _ = response.send(Err(e)); + continue; + } + + let mut list: HashSet = match self.list { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => HashSet::new(), + }; + + let old_document = list + .iter() + .find(|old_doc| { + document.did == old_doc.did && document.short_id == old_doc.short_id + }) + .cloned(); + + match old_document { + Some(old_document) => { + if !old_document.different(&document) { + let _ = response.send(Err(Error::IdentityExist)); + continue; + } + + list.replace(document); + + let cid = match list.to_cid(&self.ipfs).await { + Ok(cid) => cid, + Err(e) => { + let _ = response.send(Err(e)); + continue; + } + }; + + let old_cid = self.list.take(); + + let remove_pin_and_block = async { + if let Some(old_cid) = old_cid { + if self.ipfs.is_pinned(&old_cid).await? { + self.ipfs.remove_pin(&old_cid, false).await?; + } + // Do we want to remove the old block? + self.ipfs.remove_block(old_cid).await?; + } + Ok::<_, Error>(()) + }; + + if let Err(e) = remove_pin_and_block.await { + let _ = response.send(Err(e)); + continue; + } + + if let Some(path) = self.path.as_ref() { + let cid = cid.to_string(); + if let Err(e) = tokio::fs::write(path.join(".cache_id"), cid).await + { + tracing::log::error!("Error writing cid to file: {e}"); + } + } + + self.list = Some(cid); + + let _ = response.send(Ok(Some(old_document.clone()))); + } + None => { + list.insert(document); + + let cid = match list.to_cid(&self.ipfs).await { + Ok(cid) => cid, + Err(e) => { + let _ = response.send(Err(e)); + continue; + } + }; + + if let Err(e) = self.ipfs.insert_pin(&cid, false).await { + let _ = response.send(Err(e.into())); + continue; + } + + if let Some(path) = self.path.as_ref() { + let cid = cid.to_string(); + if let Err(e) = tokio::fs::write(path.join(".cache_id"), cid).await + { + tracing::log::error!("Error writing cid to file: {e}"); + } + } + + self.list = Some(cid); + + let _ = response.send(Ok(None)); + } + } + } + IdentityCacheCommand::Get { did, response } => { + let list: HashSet = match self.list { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => HashSet::new(), + }; + + let document = list + .iter() + .find(|document| document.did == did) + .cloned() + .ok_or(Error::IdentityDoesntExist); + + let _ = response.send(document); + } + IdentityCacheCommand::Remove { did, response } => { + let mut list: HashSet = match self.list { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => { + let _ = response.send(Err(Error::IdentityDoesntExist)); + continue; + } + }; + + let old_document = list.iter().find(|document| document.did == did).cloned(); + + if old_document.is_none() { + let _ = response.send(Err(Error::IdentityDoesntExist)); + continue; + } + + let document = old_document.expect("Exist"); + + if !list.remove(&document) { + let _ = response.send(Err(Error::IdentityDoesntExist)); + continue; + } + + let cid = match list.to_cid(&self.ipfs).await { + Ok(cid) => cid, + Err(e) => { + let _ = response.send(Err(e)); + continue; + } + }; + + let old_cid = self.list.take(); + + let remove_pin_and_block = async { + if let Some(old_cid) = old_cid { + if self.ipfs.is_pinned(&old_cid).await? { + self.ipfs.remove_pin(&old_cid, false).await?; + } + // Do we want to remove the old block? + self.ipfs.remove_block(old_cid).await?; + } + Ok::<_, Error>(()) + }; + + if let Err(e) = remove_pin_and_block.await { + let _ = response.send(Err(e)); + continue; + } + + if let Some(path) = self.path.as_ref() { + let cid = cid.to_string(); + if let Err(e) = tokio::fs::write(path.join(".cache_id"), cid).await { + tracing::log::error!("Error writing cid to file: {e}"); + } + } + + self.list = Some(cid); + + let _ = response.send(Ok(())); + } + IdentityCacheCommand::List { response } => { + let list: HashSet = match self.list { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => HashSet::new(), + }; + + let _ = response.send(Ok(Vec::from_iter(list))); + } + } + } + } +} diff --git a/extensions/warp-ipfs/src/store/document/identity.rs b/extensions/warp-ipfs/src/store/document/identity.rs index e4a69c035..1105b7102 100644 --- a/extensions/warp-ipfs/src/store/document/identity.rs +++ b/extensions/warp-ipfs/src/store/document/identity.rs @@ -1,10 +1,10 @@ -use futures::StreamExt; +use futures::{StreamExt, TryStreamExt}; use libipld::Cid; use rust_ipfs::{Ipfs, IpfsPath}; use serde::{Deserialize, Serialize}; use std::{hash::Hash, time::Duration}; use warp::{ - crypto::{did_key::CoreSign, DID}, + crypto::{did_key::CoreSign, Fingerprint, DID}, error::Error, multipass::identity::{Identity, IdentityStatus, Platform, SHORT_ID_SIZE}, }; @@ -123,7 +123,45 @@ impl IdentityDocument { pub fn verify(&self) -> Result<(), Error> { let mut payload = self.clone(); - //TODO: Validate username, short id, and status message + if payload.username.is_empty() { + return Err(Error::IdentityInvalid); //TODO: Invalid username + } + + if !(4..=64).contains(&payload.username.len()) { + return Err(Error::InvalidLength { + context: "username".into(), + current: payload.username.len(), + minimum: Some(4), + maximum: Some(64), + }); + } + + if payload.short_id.is_empty() { + return Err(Error::IdentityInvalid); //TODO: Invalid short id + } + + let fingerprint = payload.did.fingerprint(); + + let bytes = fingerprint.as_bytes(); + + let short_id: [u8; SHORT_ID_SIZE] = bytes[bytes.len() - SHORT_ID_SIZE..] + .try_into() + .map_err(anyhow::Error::from)?; + + if payload.short_id != short_id { + return Err(Error::IdentityInvalid); //TODO: Invalid short id + } + + if let Some(status) = &payload.status_message { + if status.len() > 256 { + return Err(Error::InvalidLength { + context: "identity status message".into(), + current: status.len(), + minimum: None, + maximum: Some(256), + }); + } + } let signature = std::mem::take(&mut payload.signature).ok_or(Error::InvalidSignature)?; let signature_bytes = bs58::decode(signature).into_vec()?; @@ -142,47 +180,31 @@ pub async fn unixfs_fetch( local: bool, limit: Option, ) -> Result, Error> { - let fut = async { - let stream = ipfs - .unixfs() - .cat(IpfsPath::from(cid), None, &[], local, None) - .await - .map_err(anyhow::Error::from)?; + let timeout = timeout.or(Some(std::time::Duration::from_secs(15))); - futures::pin_mut!(stream); + let mut stream = ipfs + .unixfs() + .cat(IpfsPath::from(cid), None, &[], local, timeout) + .await + .map_err(anyhow::Error::from)? + .boxed(); - let mut data = vec![]; + let mut data = vec![]; - while let Some(stream) = stream.next().await { - if let Some(limit) = limit { - if data.len() >= limit { - return Err(Error::InvalidLength { - context: "data".into(), - current: data.len(), - minimum: None, - maximum: Some(limit), - }); - } - } - match stream { - Ok(bytes) => { - data.extend(bytes); - } - Err(e) => return Err(Error::from(anyhow::anyhow!("{e}"))), - } - } - Ok(data) - }; - - match local { - true => fut.await, - false => { - let timeout = timeout.unwrap_or(std::time::Duration::from_secs(15)); - match tokio::time::timeout(timeout, fut).await { - Ok(Ok(data)) => serde_json::from_slice(&data).map_err(Error::from), - Ok(Err(e)) => Err(e), - Err(e) => Err(Error::from(anyhow::anyhow!("Timeout at {e}"))), - } + while let Some(stream) = stream.try_next().await.map_err(anyhow::Error::from)? { + data.extend(stream); + } + + if let Some(limit) = limit { + if data.len() > limit { + return Err(Error::InvalidLength { + context: "data".into(), + current: data.len(), + minimum: None, + maximum: Some(limit), + }); } } + + Ok(data) } diff --git a/extensions/warp-ipfs/src/store/document/root.rs b/extensions/warp-ipfs/src/store/document/root.rs new file mode 100644 index 000000000..216452b14 --- /dev/null +++ b/extensions/warp-ipfs/src/store/document/root.rs @@ -0,0 +1,625 @@ +use std::{path::PathBuf, sync::Arc}; + +use futures::{ + channel::{mpsc::Receiver, oneshot}, + SinkExt, StreamExt, +}; +use libipld::Cid; +use rust_ipfs::{Ipfs, IpfsPath}; +use warp::{crypto::DID, error::Error}; + +use crate::store::{identity::Request, VecExt}; + +use super::{ + utils::{GetLocalDag, ToCid}, + RootDocument, +}; + +#[allow(clippy::large_enum_variant)] +pub enum RootDocumentCommand { + Get { + response: oneshot::Sender>, + }, + Set { + document: RootDocument, + response: oneshot::Sender>, + }, + AddFriend { + did: DID, + response: oneshot::Sender>, + }, + RemoveFriend { + did: DID, + response: oneshot::Sender>, + }, + GetFriendList { + response: oneshot::Sender, Error>>, + }, + AddRequest { + request: Request, + response: oneshot::Sender>, + }, + RemoveRequest { + request: Request, + response: oneshot::Sender>, + }, + GetRequestList { + response: oneshot::Sender, Error>>, + }, + AddBlock { + did: DID, + response: oneshot::Sender>, + }, + RemoveBlock { + did: DID, + response: oneshot::Sender>, + }, + GetBlockList { + response: oneshot::Sender, Error>>, + }, + AddBlockBy { + did: DID, + response: oneshot::Sender>, + }, + RemoveBlockBy { + did: DID, + response: oneshot::Sender>, + }, + GetBlockByList { + response: oneshot::Sender, Error>>, + }, +} + +#[derive(Debug, Clone)] +pub struct RootDocumentMap { + tx: futures::channel::mpsc::Sender, + task: Arc>, +} + +impl Drop for RootDocumentMap { + fn drop(&mut self) { + if Arc::strong_count(&self.task) == 1 && !self.task.is_finished() { + self.task.abort(); + } + } +} + +impl RootDocumentMap { + pub async fn new(ipfs: &Ipfs, keypair: Arc, path: Option) -> Self { + let cid = match path.as_ref() { + Some(path) => tokio::fs::read(path.join(".id")) + .await + .map(|bytes| String::from_utf8_lossy(&bytes).to_string()) + .ok() + .and_then(|cid_str| cid_str.parse().ok()), + None => None, + }; + + let (tx, rx) = futures::channel::mpsc::channel(1); + + let mut task = RootDocumentTask { + ipfs: ipfs.clone(), + keypair, + path, + cid, + rx, + }; + + let handle = tokio::spawn(async move { + task.start().await; + }); + + Self { + tx, + task: Arc::new(handle), + } + } + + pub async fn get(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::Get { response: tx }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn set(&mut self, document: RootDocument) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::Set { + document, + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn add_friend(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::AddFriend { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn remove_friend(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::RemoveFriend { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn add_block(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::AddBlock { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn remove_block(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::RemoveBlock { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn add_block_by(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::AddBlockBy { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn remove_block_by(&self, did: &DID) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::RemoveBlockBy { + did: did.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn add_request(&self, request: &Request) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::AddRequest { + request: request.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn remove_request(&self, request: &Request) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::RemoveRequest { + request: request.clone(), + response: tx, + }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn get_friends(&self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::GetFriendList { response: tx }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn get_requests(&self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::GetRequestList { response: tx }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn get_blocks(&self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::GetBlockList { response: tx }) + .await; + rx.await.map_err(anyhow::Error::from)? + } + + pub async fn get_block_by(&self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + let _ = self + .tx + .clone() + .send(RootDocumentCommand::GetBlockByList { response: tx }) + .await; + rx.await.map_err(anyhow::Error::from)? + } +} + +struct RootDocumentTask { + keypair: Arc, + path: Option, + ipfs: Ipfs, + cid: Option, + rx: Receiver, +} + +impl RootDocumentTask { + pub async fn start(&mut self) { + while let Some(command) = self.rx.next().await { + match command { + RootDocumentCommand::Get { response } => { + let _ = response.send(self.get_root_document().await); + } + RootDocumentCommand::Set { document, response } => { + let _ = response.send(self.set_root_document(document).await); + } + RootDocumentCommand::AddFriend { did, response } => { + let _ = response.send(self.add_friend(did).await); + } + RootDocumentCommand::RemoveFriend { did, response } => { + let _ = response.send(self.remove_friend(did).await); + } + RootDocumentCommand::GetFriendList { response } => { + let _ = response.send(self.friend_list().await); + } + RootDocumentCommand::AddRequest { request, response } => { + let _ = response.send(self.add_request(request).await); + } + RootDocumentCommand::RemoveRequest { request, response } => { + let _ = response.send(self.remove_request(request).await); + } + RootDocumentCommand::GetRequestList { response } => { + let _ = response.send(self.request_list().await); + } + RootDocumentCommand::AddBlock { did, response } => { + let _ = response.send(self.block_key(did).await); + } + RootDocumentCommand::RemoveBlock { did, response } => { + let _ = response.send(self.unblock_key(did).await); + } + RootDocumentCommand::GetBlockList { response } => { + let _ = response.send(self.block_list().await); + } + RootDocumentCommand::AddBlockBy { did, response } => { + let _ = response.send(self.add_blockby_key(did).await); + } + RootDocumentCommand::RemoveBlockBy { did, response } => { + let _ = response.send(self.remove_blockby_key(did).await); + } + RootDocumentCommand::GetBlockByList { response } => { + let _ = response.send(self.blockby_list().await); + } + } + } + } +} + +impl RootDocumentTask { + async fn get_root_document(&self) -> Result { + let document: RootDocument = match self.cid { + Some(cid) => cid.get_local_dag(&self.ipfs).await?, + None => return Err(Error::Other), + }; + + document.verify(&self.ipfs).await?; + + Ok(document) + } + + async fn set_root_document(&mut self, mut document: RootDocument) -> Result<(), Error> { + let old_cid = self.cid; + + document.sign(&self.keypair)?; + + //Precautionary check + document.verify(&self.ipfs).await?; + + let root_cid = document.to_cid(&self.ipfs).await?; + if !self.ipfs.is_pinned(&root_cid).await? { + self.ipfs.insert_pin(&root_cid, true).await?; + } + + if let Some(old_cid) = old_cid { + if old_cid != root_cid { + if self.ipfs.is_pinned(&old_cid).await? { + self.ipfs.remove_pin(&old_cid, true).await?; + } + self.ipfs.remove_block(old_cid).await?; + } + } + + if let Some(path) = self.path.as_ref() { + let cid = root_cid.to_string(); + if let Err(e) = tokio::fs::write(path.join(".id"), cid).await { + tracing::log::error!("Error writing to '.id': {e}.") + } + } + + self.cid = Some(root_cid); + Ok(()) + } + + async fn request_list(&self) -> Result, Error> { + let cid = match self.cid { + Some(cid) => cid, + None => return Ok(vec![]), + }; + let path = IpfsPath::from(cid).sub_path("request")?; + let list: Vec = path.get_local_dag(&self.ipfs).await.unwrap_or_default(); + Ok(list) + } + + async fn add_request(&mut self, request: Request) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.request; + let mut list: Vec = match document.request { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.insert_item(&request) { + return Err(Error::FriendRequestExist); + } + + document.request = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + + Ok(()) + } + + async fn remove_request(&mut self, request: Request) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.request; + let mut list: Vec = match document.request { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.remove_item(&request) { + return Err(Error::FriendRequestExist); + } + + document.request = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + + Ok(()) + } + + async fn friend_list(&self) -> Result, Error> { + let cid = match self.cid { + Some(cid) => cid, + None => return Ok(vec![]), + }; + let path = IpfsPath::from(cid).sub_path("friends")?; + let list: Vec = path.get_local_dag(&self.ipfs).await.unwrap_or_default(); + Ok(list) + } + + async fn add_friend(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.friends; + let mut list: Vec = match document.friends { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.insert_item(&did) { + return Err::<_, Error>(Error::FriendExist); + } + + document.friends = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + + Ok(()) + } + + async fn remove_friend(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.friends; + let mut list: Vec = match document.friends { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.remove_item(&did) { + return Err::<_, Error>(Error::FriendDoesntExist); + } + + document.friends = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + + Ok(()) + } + + async fn block_list(&self) -> Result, Error> { + let cid = match self.cid { + Some(cid) => cid, + None => return Ok(vec![]), + }; + let path = IpfsPath::from(cid).sub_path("blocks")?; + let list: Vec = path.get_local_dag(&self.ipfs).await.unwrap_or_default(); + Ok(list) + } + + async fn block_key(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.blocks; + let mut list: Vec = match document.blocks { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.insert_item(&did) { + return Err::<_, Error>(Error::PublicKeyIsBlocked); + } + + document.blocks = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + Ok(()) + } + + async fn unblock_key(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.blocks; + let mut list: Vec = match document.blocks { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.remove_item(&did) { + return Err::<_, Error>(Error::PublicKeyIsntBlocked); + } + + document.blocks = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + Ok(()) + } + + async fn blockby_list(&self) -> Result, Error> { + let cid = match self.cid { + Some(cid) => cid, + None => return Ok(vec![]), + }; + let path = IpfsPath::from(cid).sub_path("block_by")?; + let list: Vec = path.get_local_dag(&self.ipfs).await.unwrap_or_default(); + Ok(list) + } + + async fn add_blockby_key(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.block_by; + let mut list: Vec = match document.block_by { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.insert_item(&did) { + return Err::<_, Error>(Error::PublicKeyIsntBlocked); + } + + document.block_by = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + Ok(()) + } + + async fn remove_blockby_key(&mut self, did: DID) -> Result<(), Error> { + let mut document = self.get_root_document().await?; + let old_document = document.block_by; + let mut list: Vec = match document.block_by { + Some(cid) => cid.get_local_dag(&self.ipfs).await.unwrap_or_default(), + None => vec![], + }; + + if !list.remove_item(&did) { + return Err::<_, Error>(Error::PublicKeyIsntBlocked); + } + + document.block_by = (!list.is_empty()).then_some(list.to_cid(&self.ipfs).await?); + + self.set_root_document(document).await?; + + if let Some(cid) = old_document { + if !self.ipfs.is_pinned(&cid).await? { + self.ipfs.remove_block(cid).await?; + } + } + Ok(()) + } +} diff --git a/extensions/warp-ipfs/src/store/files.rs b/extensions/warp-ipfs/src/store/files.rs index 26800f6d2..3b407bd6d 100644 --- a/extensions/warp-ipfs/src/store/files.rs +++ b/extensions/warp-ipfs/src/store/files.rs @@ -4,7 +4,7 @@ use chrono::{DateTime, Utc}; use futures::{ pin_mut, stream::{self, BoxStream}, - StreamExt, + StreamExt, TryStreamExt, }; use libipld::Cid; use rust_ipfs::{ @@ -21,7 +21,11 @@ use warp::{ sync::RwLock, }; -use crate::{config::Config, thumbnail::ThumbnailGenerator, to_file_type}; +use crate::{ + config::{self, Config}, + thumbnail::ThumbnailGenerator, + to_file_type, +}; use super::{ecdh_decrypt, ecdh_encrypt, get_keypair_did}; @@ -39,11 +43,7 @@ pub struct FileStore { ipfs: Ipfs, constellation_tx: broadcast::Sender, - max_storage_size: Option, - max_file_size: Option, - thumbnail_size: (u32, u32), - thumbnail_task: bool, - thumbnail_exact_format: bool, + config: config::Config, } impl FileStore { @@ -72,17 +72,14 @@ impl FileStore { } } + let config = config.clone(); + let index = Directory::new("root"); let path = Arc::default(); let modified = Utc::now(); let index_cid = Arc::new(RwLock::new(index_cid)); let thumbnail_store = ThumbnailGenerator::default(); - let max_storage_size = config.max_storage_size; - let max_file_size = config.max_file_size; - let thumbnail_size = config.thumbnail_size; - let thumbnail_exact_format = config.thumbnail_exact_format; - let thumbnail_task = config.thumbnail_task; let location_path = config.path.clone(); let store = FileStore { @@ -94,11 +91,7 @@ impl FileStore { location_path, ipfs, constellation_tx, - max_storage_size, - max_file_size, - thumbnail_size, - thumbnail_exact_format, - thumbnail_task, + config, }; if let Err(e) = store.import().await { @@ -121,33 +114,35 @@ impl FileStore { } async fn import(&self) -> Result<(), Error> { - if self.location_path.is_some() { - let cid = (*self.index_cid.read()).ok_or(Error::Other)?; + if self.config.path.is_none() { + return Ok(()); + } - let mut index_stream = self - .ipfs - .unixfs() - .cat(IpfsPath::from(cid), None, &[], true, None) - .await - .map_err(anyhow::Error::from)? - .boxed(); + let cid = (*self.index_cid.read()).ok_or(Error::Other)?; - let mut data = vec![]; + let mut index_stream = self + .ipfs + .unixfs() + .cat(IpfsPath::from(cid), None, &[], true, None) + .await + .map_err(anyhow::Error::from)? + .boxed(); - while let Some(result) = index_stream.next().await { - let mut bytes = result.map_err(anyhow::Error::from)?; - data.append(&mut bytes); - } + let mut data = vec![]; - let key = self.ipfs.keypair().and_then(get_keypair_did)?; + while let Some(bytes) = index_stream.try_next().await.map_err(anyhow::Error::from)? { + data.extend(bytes); + } + + let key = self.ipfs.keypair().and_then(get_keypair_did)?; - let index_bytes = ecdh_decrypt(&key, None, data)?; + let index_bytes = ecdh_decrypt(&key, None, data)?; - let mut directory_index: Directory = serde_json::from_slice(&index_bytes)?; - directory_index.rebuild_paths(); + let mut directory_index: Directory = serde_json::from_slice(&index_bytes)?; + directory_index.rebuild_paths(); + + self.index.set_items(directory_index.get_items()); - self.index.set_items(directory_index.get_items()); - } Ok(()) } @@ -241,7 +236,7 @@ impl FileStore { } } - if let Some(path) = self.location_path.as_ref() { + if let Some(path) = self.config.path.as_ref() { if let Err(_e) = tokio::fs::write(path.join(".index_id"), cid.to_string()).await { tracing::error!("Error writing index: {_e}"); } @@ -281,7 +276,7 @@ impl FileStore { } pub fn max_size(&self) -> usize { - self.max_storage_size.unwrap_or(1024 * 1024 * 1024) + self.config.max_storage_size.unwrap_or(1024 * 1024 * 1024) } pub async fn put( @@ -327,14 +322,17 @@ impl FileStore { return Err(Error::FileExist); } - let ((width, height), exact) = (self.thumbnail_size, self.thumbnail_exact_format); + let ((width, height), exact) = ( + self.config.thumbnail_size, + self.config.thumbnail_exact_format, + ); let ticket = self .thumbnail_store .insert(&path, width, height, exact) .await?; - let background = self.thumbnail_task; + let background = self.config.thumbnail_task; let name = name.to_string(); let fs = self.clone(); @@ -518,7 +516,10 @@ impl FileStore { return Err(Error::FileExist); } - let ((width, height), exact) = (self.thumbnail_size, self.thumbnail_exact_format); + let ((width, height), exact) = ( + self.config.thumbnail_size, + self.config.thumbnail_exact_format, + ); let ticket = self .thumbnail_store @@ -932,7 +933,10 @@ impl FileStore { buffer.extend(bytes); } - let ((width, height), exact) = (self.thumbnail_size, self.thumbnail_exact_format); + let ((width, height), exact) = ( + self.config.thumbnail_size, + self.config.thumbnail_exact_format, + ); // Generate the thumbnail for the file let id = self diff --git a/extensions/warp-ipfs/src/store/friends.rs b/extensions/warp-ipfs/src/store/friends.rs deleted file mode 100644 index 34548ba6d..000000000 --- a/extensions/warp-ipfs/src/store/friends.rs +++ /dev/null @@ -1,1058 +0,0 @@ -#![allow(clippy::await_holding_lock)] -use futures::channel::oneshot; -use futures::StreamExt; -use ipfs::{Ipfs, PeerId}; -use rust_ipfs as ipfs; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::time::{Duration, Instant}; -use tokio::sync::{broadcast, RwLock}; -use tracing::log::{self, error, warn}; -use warp::crypto::DID; -use warp::error::Error; -use warp::multipass::MultiPassEventKind; -use warp::sync::Arc; - -use warp::tesseract::Tesseract; - -use crate::behaviour::phonebook::PhoneBookCommand; -use crate::config::Config as MpIpfsConfig; -use crate::store::{ecdh_decrypt, ecdh_encrypt, PeerIdExt, PeerTopic}; - -use super::identity::{IdentityStore, LookupBy, RequestOption}; -use super::phonebook::PhoneBook; -use super::queue::Queue; -use super::{did_keypair, did_to_libp2p_pub, discovery, libp2p_pub_to_did}; - -#[allow(clippy::type_complexity)] -#[derive(Clone)] -pub struct FriendsStore { - ipfs: Ipfs, - - // Identity Store - identity: IdentityStore, - - discovery: discovery::Discovery, - - // keypair - did_key: Arc, - - // Queue to handle sending friend request - queue: Queue, - - phonebook: Option, - - wait_on_response: Option, - - signal: Arc>>>>, - - tx: broadcast::Sender, -} - -#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] -pub enum Request { - In(DID), - Out(DID), -} - -impl From for RequestType { - fn from(request: Request) -> Self { - RequestType::from(&request) - } -} - -impl From<&Request> for RequestType { - fn from(request: &Request) -> Self { - match request { - Request::In(_) => RequestType::Incoming, - Request::Out(_) => RequestType::Outgoing, - } - } -} - -impl Request { - pub fn r#type(&self) -> RequestType { - self.into() - } - - pub fn did(&self) -> &DID { - match self { - Request::In(did) => did, - Request::Out(did) => did, - } - } -} - -#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq, Hash, Eq)] -#[serde(rename_all = "lowercase", tag = "type")] -pub enum Event { - /// Event indicating a friend request - Request, - /// Event accepting the request - Accept, - /// Remove identity as a friend - Remove, - /// Reject friend request, if any - Reject, - /// Retract a sent friend request - Retract, - /// Block user - Block, - /// Unblock user - Unblock, - /// Indiciation of a response to a request - Response, -} - -#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Hash, Eq)] -pub struct RequestResponsePayload { - pub sender: DID, - pub event: Event, -} - -#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestType { - Incoming, - Outgoing, -} - -impl FriendsStore { - pub async fn new( - ipfs: Ipfs, - identity: IdentityStore, - discovery: discovery::Discovery, - config: MpIpfsConfig, - tesseract: Tesseract, - tx: broadcast::Sender, - pb_tx: futures::channel::mpsc::Sender, - ) -> anyhow::Result { - let did_key = Arc::new(did_keypair(&tesseract)?); - - let queue = Queue::new( - ipfs.clone(), - did_key.clone(), - config.path.clone(), - discovery.clone(), - ); - - let phonebook = config.store_setting.use_phonebook.then_some(PhoneBook::new( - ipfs.clone(), - discovery.clone(), - tx.clone(), - config.store_setting.emit_online_event, - pb_tx, - )); - - let signal = Default::default(); - let wait_on_response = config.store_setting.friend_request_response_duration; - let store = Self { - ipfs, - identity, - discovery, - did_key, - queue, - phonebook, - tx, - signal, - wait_on_response, - }; - - tokio::spawn({ - let mut store = store.clone(); - async move { - log::info!("Loading queue"); - tokio::spawn({ - let store = store.clone(); - async move { if let Err(_e) = store.queue.load().await {} } - }); - - if let Some(phonebook) = store.phonebook.as_ref() { - log::info!("Loading friends list into phonebook"); - if let Ok(friends) = store.friends_list().await { - if let Err(_e) = phonebook.add_friend_list(friends).await { - error!("Error adding friends in phonebook: {_e}"); - } - } - } - - // autoban the blocklist - // TODO: implement configuration open to autoban at the libp2p level - // Note: If this is done, we would need to attempt reconnection if the user - // is ever unblocked - // match store.block_list().await { - // Ok(list) => { - // for pubkey in list { - // if let Ok(peer_id) = did_to_libp2p_pub(&pubkey).map(|p| p.to_peer_id()) - // { - // if let Err(e) = store.ipfs.ban_peer(peer_id).await { - // error!("Error banning peer: {e}"); - // } - // } - // } - // } - // Err(e) => { - // error!("Error loading block list: {e}"); - // } - // }; - - // scan through friends list to see if there is any incoming request or outgoing request matching - // and clear them out of the request list as a precautionary measure - tokio::spawn({ - let store = store.clone(); - async move { - let friends = match store.friends_list().await { - Ok(list) => list, - _ => return, - }; - - for friend in friends.iter() { - let list = store.list_all_raw_request().await.unwrap_or_default(); - - // cleanup outgoing - for req in list.iter().filter(|req| req.did().eq(friend)) { - let _ = store.identity.root_document_remove_request(req).await.ok(); - } - } - } - }); - - let stream = match store.ipfs.pubsub_subscribe(store.did_key.inbox()).await { - Ok(stream) => stream, - Err(e) => { - //TODO: Maybe panic as a means of notifying about this being a fatal error? - log::error!("Error subscribing to topic: {e}"); - return; - } - }; - - futures::pin_mut!(stream); - - while let Some(message) = stream.next().await { - let Some(peer_id) = message.source else { - //Note: Due to configuration, we should ALWAYS have a peer set in its source - // thus we can ignore the request if no peer is provided - continue; - }; - - let Ok(did) = peer_id.to_did() else { - //Note: The peer id is embedded with ed25519 public key, therefore we can decode it into a did key - // otherwise we can ignore - continue; - }; - - if let Err(e) = store.check_request_message(&did, &message.data).await { - error!("Error: {e}"); - } - } - } - }); - tokio::task::yield_now().await; - Ok(store) - } - - pub(crate) fn phonebook(&self) -> Option<&PhoneBook> { - self.phonebook.as_ref() - } - - //TODO: Implement Errors - #[tracing::instrument(skip(self, data))] - async fn check_request_message(&mut self, did: &DID, data: &[u8]) -> anyhow::Result<()> { - let pk_did = &*self.did_key; - - let bytes = ecdh_decrypt(pk_did, Some(did), data)?; - - log::trace!("received payload size: {} bytes", bytes.len()); - - let data = serde_json::from_slice::(&bytes)?; - - log::info!("Received event from {did}"); - - if self - .list_incoming_request() - .await - .unwrap_or_default() - .contains(&data.sender) - && data.event == Event::Request - { - warn!("Request exist locally. Skipping"); - return Ok(()); - } - - //TODO: Send error if dropped early due to error when processing request - let mut signal = self.signal.write().await.remove(&data.sender); - - log::debug!("Event {:?}", data.event); - - // Before we validate the request, we should check to see if the key is blocked - // If it is, skip the request so we dont wait resources storing it. - if self.is_blocked(&data.sender).await? && !matches!(data.event, Event::Block) { - log::warn!("Received event from a blocked identity."); - let payload = RequestResponsePayload { - sender: (*self.did_key).clone(), - event: Event::Block, - }; - - self.broadcast_request((&data.sender, &payload), false, true) - .await?; - - return Ok(()); - } - - match data.event { - Event::Accept => { - let list = self.list_all_raw_request().await?; - - let Some(item) = list - .iter() - .filter(|req| req.r#type() == RequestType::Outgoing) - .find(|req| data.sender.eq(req.did())) - .cloned() - else { - anyhow::bail!( - "Unable to locate pending request. Already been accepted or rejected?" - ) - }; - - // Maybe just try the function instead and have it be a hard error? - if self - .identity - .root_document_remove_request(&item) - .await - .is_err() - { - anyhow::bail!( - "Unable to locate pending request. Already been accepted or rejected?" - ) - } - - self.add_friend(item.did()).await?; - } - Event::Request => { - if self.is_friend(&data.sender).await? { - log::debug!("Friend already exist. Remitting event"); - let payload = RequestResponsePayload { - sender: (*self.did_key).clone(), - event: Event::Accept, - }; - - self.broadcast_request((&data.sender, &payload), false, false) - .await?; - - return Ok(()); - } - - let list = self.list_all_raw_request().await?; - - if let Some(inner_req) = list - .iter() - .find(|request| { - request.r#type() == RequestType::Outgoing && data.sender.eq(request.did()) - }) - .cloned() - { - //Because there is also a corresponding outgoing request for the incoming request - //we can automatically add them - self.identity - .root_document_remove_request(&inner_req) - .await?; - self.add_friend(inner_req.did()).await?; - } else { - self.identity - .root_document_add_request(&Request::In(data.sender.clone())) - .await?; - - tokio::spawn({ - let store = self.identity.clone(); - let from = data.sender.clone(); - async move { - let _ = tokio::time::timeout(Duration::from_secs(10), async { - loop { - if let Ok(list) = - store.lookup(LookupBy::DidKey(from.clone())).await - { - if !list.is_empty() { - break; - } - } - tokio::time::sleep(Duration::from_secs(1)).await; - } - }) - .await - .ok(); - - store.emit_event(MultiPassEventKind::FriendRequestReceived { from }); - } - }); - } - let payload = RequestResponsePayload { - sender: (*self.did_key).clone(), - event: Event::Response, - }; - - self.broadcast_request((&data.sender, &payload), false, false) - .await?; - } - Event::Reject => { - let list = self.list_all_raw_request().await?; - let internal_request = list - .iter() - .find(|request| { - request.r#type() == RequestType::Outgoing && data.sender.eq(request.did()) - }) - .cloned() - .ok_or(Error::FriendRequestDoesntExist)?; - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - self.emit_event(MultiPassEventKind::OutgoingFriendRequestRejected { - did: data.sender, - }); - } - Event::Remove => { - if self.is_friend(&data.sender).await? { - self.remove_friend(&data.sender, false).await?; - } - } - Event::Retract => { - let list = self.list_all_raw_request().await?; - let internal_request = list - .iter() - .find(|request| { - request.r#type() == RequestType::Incoming && data.sender.eq(request.did()) - }) - .cloned() - .ok_or(Error::FriendRequestDoesntExist)?; - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - self.emit_event(MultiPassEventKind::IncomingFriendRequestClosed { - did: data.sender, - }); - } - Event::Block => { - if self.has_request_from(&data.sender).await? { - self.emit_event(MultiPassEventKind::IncomingFriendRequestClosed { - did: data.sender.clone(), - }); - } else if self.sent_friend_request_to(&data.sender).await? { - self.emit_event(MultiPassEventKind::OutgoingFriendRequestRejected { - did: data.sender.clone(), - }); - } - - let list = self.list_all_raw_request().await?; - for req in list.iter().filter(|req| req.did().eq(&data.sender)) { - self.identity.root_document_remove_request(req).await?; - } - - if self.is_friend(&data.sender).await? { - self.remove_friend(&data.sender, false).await?; - } - - let completed = self - .identity - .root_document_add_block_by(&data.sender) - .await - .is_ok(); - if completed { - tokio::spawn({ - let store = self.identity.clone(); - let sender = data.sender.clone(); - async move { - let _ = store.push(&sender).await.ok(); - let _ = store.request(&sender, RequestOption::Identity).await.ok(); - } - }); - - if let Err(e) = self - .tx - .send(MultiPassEventKind::BlockedBy { did: data.sender }) - { - error!("Error broadcasting event: {e}"); - } - } - - if let Some(tx) = std::mem::take(&mut signal) { - log::debug!("Signaling broadcast of response..."); - let _ = tx.send(Err(Error::BlockedByUser)); - } - } - Event::Unblock => { - let completed = self - .identity - .root_document_remove_block_by(&data.sender) - .await - .is_ok(); - - if completed { - tokio::spawn({ - let store = self.identity.clone(); - let sender = data.sender.clone(); - async move { - let _ = store.push(&sender).await.ok(); - let _ = store.request(&sender, RequestOption::Identity).await.ok(); - } - }); - self.emit_event(MultiPassEventKind::UnblockedBy { did: data.sender }); - } - } - Event::Response => { - if let Some(tx) = std::mem::take(&mut signal) { - log::debug!("Signaling broadcast of response..."); - let _ = tx.send(Ok(())); - } - } - }; - if let Some(tx) = std::mem::take(&mut signal) { - log::debug!("Signaling broadcast of response..."); - let _ = tx.send(Ok(())); - } - - Ok(()) - } - - async fn local(&self) -> anyhow::Result<(ipfs::libp2p::identity::PublicKey, PeerId)> { - let (local_ipfs_public_key, local_peer_id) = self - .ipfs - .identity(None) - .await - .map(|info| (info.public_key.clone(), info.peer_id))?; - Ok((local_ipfs_public_key, local_peer_id)) - } -} - -impl FriendsStore { - #[tracing::instrument(skip(self))] - pub async fn send_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - if local_public_key.eq(pubkey) { - return Err(Error::CannotSendSelfFriendRequest); - } - - if self.is_friend(pubkey).await? { - return Err(Error::FriendExist); - } - - if self.is_blocked_by(pubkey).await? { - return Err(Error::BlockedByUser); - } - - if self.is_blocked(pubkey).await? { - return Err(Error::PublicKeyIsBlocked); - } - - if self.has_request_from(pubkey).await? { - return self.accept_request(pubkey).await; - } - - let list = self.list_all_raw_request().await?; - - if list - .iter() - .any(|request| request.r#type() == RequestType::Outgoing && request.did().eq(pubkey)) - { - // since the request has already been sent, we should not be sending it again - return Err(Error::FriendRequestExist); - } - - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Request, - }; - - self.broadcast_request((pubkey, &payload), true, true).await - } - - #[tracing::instrument(skip(self))] - pub async fn accept_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - if local_public_key.eq(pubkey) { - return Err(Error::CannotAcceptSelfAsFriend); - } - - if !self.has_request_from(pubkey).await? { - return Err(Error::FriendRequestDoesntExist); - } - - let list = self.list_all_raw_request().await?; - - let internal_request = list - .iter() - .find(|request| request.r#type() == RequestType::Incoming && request.did().eq(pubkey)) - .cloned() - .ok_or(Error::CannotFindFriendRequest)?; - - if self.is_friend(pubkey).await? { - warn!("Already friends. Removing request"); - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - return Ok(()); - } - - let payload = RequestResponsePayload { - event: Event::Accept, - sender: local_public_key, - }; - - self.add_friend(pubkey).await?; - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - self.broadcast_request((pubkey, &payload), false, true) - .await - } - - #[tracing::instrument(skip(self))] - pub async fn reject_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - if local_public_key.eq(pubkey) { - return Err(Error::CannotDenySelfAsFriend); - } - - if !self.has_request_from(pubkey).await? { - return Err(Error::FriendRequestDoesntExist); - } - - let list = self.list_all_raw_request().await?; - - // Although the request been validated before storing, we should validate again just to be safe - let internal_request = list - .iter() - .find(|request| request.r#type() == RequestType::Incoming && request.did().eq(pubkey)) - .cloned() - .ok_or(Error::CannotFindFriendRequest)?; - - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Reject, - }; - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - self.broadcast_request((pubkey, &payload), false, true) - .await - } - - #[tracing::instrument(skip(self))] - pub async fn close_request(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - let list = self.list_all_raw_request().await?; - - let internal_request = list - .iter() - .find(|request| request.r#type() == RequestType::Outgoing && request.did().eq(pubkey)) - .cloned() - .ok_or(Error::CannotFindFriendRequest)?; - - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Retract, - }; - - self.identity - .root_document_remove_request(&internal_request) - .await?; - - if let Some(entry) = self.queue.get(pubkey).await { - if entry.event == Event::Request { - self.queue.remove(pubkey).await; - self.emit_event(MultiPassEventKind::OutgoingFriendRequestClosed { - did: pubkey.clone(), - }); - - return Ok(()); - } - } - - self.broadcast_request((pubkey, &payload), false, true) - .await - } - - #[tracing::instrument(skip(self))] - pub async fn has_request_from(&self, pubkey: &DID) -> Result { - self.list_incoming_request() - .await - .map(|list| list.contains(pubkey)) - } -} - -impl FriendsStore { - #[tracing::instrument(skip(self))] - pub async fn block_list(&self) -> Result, Error> { - self.identity.root_document_get_blocks().await - } - - #[tracing::instrument(skip(self))] - pub async fn is_blocked(&self, public_key: &DID) -> Result { - self.block_list() - .await - .map(|list| list.contains(public_key)) - } - - #[tracing::instrument(skip(self))] - pub async fn block(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - if local_public_key.eq(pubkey) { - return Err(Error::CannotBlockOwnKey); - } - - if self.is_blocked(pubkey).await? { - return Err(Error::PublicKeyIsBlocked); - } - - self.identity.root_document_add_block(pubkey).await?; - - // Remove anything from queue related to the key - self.queue.remove(pubkey).await; - - let list = self.list_all_raw_request().await?; - for req in list.iter().filter(|req| req.did().eq(pubkey)) { - self.identity.root_document_remove_request(req).await?; - } - - if self.is_friend(pubkey).await? { - if let Err(e) = self.remove_friend(pubkey, false).await { - error!("Error removing item from friend list: {e}"); - } - } - - // Since we want to broadcast the remove request, banning the peer after would not allow that to happen - // Although this may get uncomment in the future to block connections regardless if its sent or not, or - // if we decide to send the request through a relay to broadcast it to the peer, however - // the moment this extension is reloaded the block list are considered as a "banned peer" in libp2p - - // let peer_id = did_to_libp2p_pub(pubkey)?.to_peer_id(); - - // self.ipfs.ban_peer(peer_id).await?; - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Block, - }; - - self.broadcast_request((pubkey, &payload), false, true) - .await - } - - #[tracing::instrument(skip(self))] - pub async fn unblock(&mut self, pubkey: &DID) -> Result<(), Error> { - let (local_ipfs_public_key, _) = self.local().await?; - - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - if local_public_key.eq(pubkey) { - return Err(Error::CannotUnblockOwnKey); - } - - if !self.is_blocked(pubkey).await? { - return Err(Error::PublicKeyIsntBlocked); - } - - self.identity.root_document_remove_block(pubkey).await?; - - let peer_id = did_to_libp2p_pub(pubkey)?.to_peer_id(); - self.ipfs.unban_peer(peer_id).await?; - - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Unblock, - }; - - self.broadcast_request((pubkey, &payload), false, true) - .await - } -} - -impl FriendsStore { - pub async fn block_by_list(&self) -> Result, Error> { - self.identity.root_document_get_block_by().await - } - - pub async fn is_blocked_by(&self, pubkey: &DID) -> Result { - self.block_by_list().await.map(|list| list.contains(pubkey)) - } -} - -impl FriendsStore { - pub async fn friends_list(&self) -> Result, Error> { - self.identity.root_document_get_friends().await - } - - // Should not be called directly but only after a request is accepted - #[tracing::instrument(skip(self))] - pub async fn add_friend(&mut self, pubkey: &DID) -> Result<(), Error> { - if self.is_friend(pubkey).await? { - return Err(Error::FriendExist); - } - - if self.is_blocked(pubkey).await? { - return Err(Error::PublicKeyIsBlocked); - } - - self.identity.root_document_add_friend(pubkey).await?; - - if let Some(phonebook) = self.phonebook.as_ref() { - if let Err(_e) = phonebook.add_friend(pubkey).await { - error!("Error: {_e}"); - } - } - - // Push to give an update in the event any wasnt transmitted during the initial push - // We dont care if this errors or not. - let _ = self.identity.push(pubkey).await.ok(); - - self.emit_event(MultiPassEventKind::FriendAdded { - did: pubkey.clone(), - }); - - Ok(()) - } - - #[tracing::instrument(skip(self, broadcast))] - pub async fn remove_friend(&mut self, pubkey: &DID, broadcast: bool) -> Result<(), Error> { - if !self.is_friend(pubkey).await? { - return Err(Error::FriendDoesntExist); - } - - self.identity.root_document_remove_friend(pubkey).await?; - - if let Some(phonebook) = self.phonebook.as_ref() { - if let Err(_e) = phonebook.remove_friend(pubkey).await { - error!("Error: {_e}"); - } - } - - if broadcast { - let (local_ipfs_public_key, _) = self.local().await?; - let local_public_key = libp2p_pub_to_did(&local_ipfs_public_key)?; - - let payload = RequestResponsePayload { - sender: local_public_key, - event: Event::Remove, - }; - - self.broadcast_request((pubkey, &payload), false, true) - .await?; - } - - self.emit_event(MultiPassEventKind::FriendRemoved { - did: pubkey.clone(), - }); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub async fn is_friend(&self, pubkey: &DID) -> Result { - self.friends_list().await.map(|list| list.contains(pubkey)) - } -} - -impl FriendsStore { - pub async fn list_all_raw_request(&self) -> Result, Error> { - self.identity.root_document_get_requests().await - } - - pub async fn received_friend_request_from(&self, did: &DID) -> Result { - self.list_incoming_request() - .await - .map(|list| list.iter().any(|request| request.eq(did))) - } - - #[tracing::instrument(skip(self))] - pub async fn list_incoming_request(&self) -> Result, Error> { - self.list_all_raw_request().await.map(|list| { - list.iter() - .filter_map(|request| match request { - Request::In(request) => Some(request), - _ => None, - }) - .cloned() - .collect::>() - }) - } - - #[tracing::instrument(skip(self))] - pub async fn sent_friend_request_to(&self, did: &DID) -> Result { - self.list_outgoing_request() - .await - .map(|list| list.iter().any(|request| request.eq(did))) - } - - #[tracing::instrument(skip(self))] - pub async fn list_outgoing_request(&self) -> Result, Error> { - self.list_all_raw_request().await.map(|list| { - list.iter() - .filter_map(|request| match request { - Request::Out(request) => Some(request), - _ => None, - }) - .cloned() - .collect::>() - }) - } - - #[tracing::instrument(skip(self))] - pub async fn broadcast_request( - &mut self, - (recipient, payload): (&DID, &RequestResponsePayload), - store_request: bool, - queue_broadcast: bool, - ) -> Result<(), Error> { - let remote_peer_id = did_to_libp2p_pub(recipient)?.to_peer_id(); - - if !self.discovery.contains(recipient).await { - self.discovery.insert(recipient).await?; - } - - if store_request { - let outgoing_request = Request::Out(recipient.clone()); - let list = self.list_all_raw_request().await?; - if !list.contains(&outgoing_request) { - self.identity - .root_document_add_request(&outgoing_request) - .await?; - } - } - - let kp = &*self.did_key; - - let payload_bytes = serde_json::to_vec(&payload)?; - - let bytes = ecdh_encrypt(kp, Some(recipient), payload_bytes)?; - - log::trace!("Request Payload size: {} bytes", bytes.len()); - - log::info!("Sending event to {recipient}"); - - let peers = self.ipfs.pubsub_peers(Some(recipient.inbox())).await?; - - let mut queued = false; - - let wait = self.wait_on_response.is_some(); - - let mut rx = (matches!(payload.event, Event::Request) && wait).then_some({ - let (tx, rx) = oneshot::channel(); - self.signal.write().await.insert(recipient.clone(), tx); - rx - }); - - let start = Instant::now(); - if !peers.contains(&remote_peer_id) - || (peers.contains(&remote_peer_id) - && self - .ipfs - .pubsub_publish(recipient.inbox(), bytes) - .await - .is_err()) - && queue_broadcast - { - self.queue.insert(recipient, payload.clone()).await; - queued = true; - self.signal.write().await.remove(recipient); - } - - if !queued { - let end = start.elapsed(); - log::trace!("Took {}ms to send event", end.as_millis()); - } - - if !queued && matches!(payload.event, Event::Request) { - if let Some(rx) = std::mem::take(&mut rx) { - if let Some(timeout) = self.wait_on_response { - let start = Instant::now(); - if let Ok(Ok(res)) = tokio::time::timeout(timeout, rx).await { - let end = start.elapsed(); - log::trace!("Took {}ms to receive a response", end.as_millis()); - res? - } - } - } - } - - match payload.event { - Event::Request => { - self.emit_event(MultiPassEventKind::FriendRequestSent { - to: recipient.clone(), - }); - } - Event::Retract => { - self.emit_event(MultiPassEventKind::OutgoingFriendRequestClosed { - did: recipient.clone(), - }); - } - Event::Reject => { - self.emit_event(MultiPassEventKind::IncomingFriendRequestRejected { - did: recipient.clone(), - }); - } - Event::Block => { - tokio::spawn({ - let store = self.identity.clone(); - let recipient = recipient.clone(); - async move { - let _ = store.push(&recipient).await.ok(); - let _ = store - .request(&recipient, RequestOption::Identity) - .await - .ok(); - } - }); - self.emit_event(MultiPassEventKind::Blocked { - did: recipient.clone(), - }); - } - Event::Unblock => { - tokio::spawn({ - let store = self.identity.clone(); - let recipient = recipient.clone(); - async move { - let _ = store.push(&recipient).await.ok(); - let _ = store - .request(&recipient, RequestOption::Identity) - .await - .ok(); - } - }); - - self.emit_event(MultiPassEventKind::Unblocked { - did: recipient.clone(), - }); - } - _ => {} - }; - Ok(()) - } -} - -impl FriendsStore { - pub fn emit_event(&self, event: MultiPassEventKind) { - if let Err(e) = self.tx.send(event) { - error!("Error broadcasting event: {e}"); - } - } -} diff --git a/extensions/warp-ipfs/src/store/identity.rs b/extensions/warp-ipfs/src/store/identity.rs index ea64e282a..4164d351b 100644 --- a/extensions/warp-ipfs/src/store/identity.rs +++ b/extensions/warp-ipfs/src/store/identity.rs @@ -2,26 +2,23 @@ //onto the lock. #![allow(clippy::clone_on_copy)] use crate::{ - config::{DefaultPfpFn, Discovery as DiscoveryConfig, UpdateEvents}, - store::{did_to_libp2p_pub, discovery::Discovery, PeerIdExt, PeerTopic, VecExt}, + behaviour::phonebook::PhoneBookCommand, + config::{self, Discovery as DiscoveryConfig, UpdateEvents}, + store::{did_to_libp2p_pub, discovery::Discovery, DidExt, PeerIdExt, PeerTopic}, }; -use futures::{ - channel::{mpsc, oneshot}, - stream::BoxStream, - StreamExt, -}; -use ipfs::{Ipfs, IpfsPath, Keypair, Multiaddr}; +use futures::{channel::oneshot, stream::BoxStream, StreamExt, TryStreamExt}; +use ipfs::{Ipfs, IpfsPath, Keypair}; use libipld::Cid; use rust_ipfs as ipfs; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use std::{ - collections::HashSet, + collections::{HashMap, HashSet}, path::PathBuf, - sync::atomic::{AtomicBool, Ordering}, + task::Poll, time::{Duration, Instant}, }; -use tokio::sync::broadcast; +use tokio::sync::{broadcast, RwLock}; use tracing::{ log::{self, error}, warn, @@ -40,59 +37,116 @@ use warp::{ }; use super::{ - connected_to_peer, + connected_to_peer, did_keypair, document::{ + cache::IdentityCache, identity::{unixfs_fetch, IdentityDocument}, + root::RootDocumentMap, utils::GetLocalDag, ExtractedRootDocument, RootDocument, ToCid, }, - ecdh_decrypt, ecdh_encrypt, - friends::{FriendsStore, Request}, - libp2p_pub_to_did, + ecdh_decrypt, ecdh_encrypt, libp2p_pub_to_did, + phonebook::PhoneBook, + queue::Queue, }; #[derive(Clone)] +#[allow(clippy::type_complexity)] pub struct IdentityStore { ipfs: Ipfs, - path: Option, - - root_cid: Arc>>, + root_document: RootDocumentMap, - cache_cid: Arc>>, - - identity: Arc>>, + identity_cache: IdentityCache, online_status: Arc>>, - discovery: Discovery, + // keypair + did_key: Arc, - relay: Vec, + // Queue to handle sending friend request + queue: Queue, - fetch_over_bitswap: Arc, + phonebook: PhoneBook, - share_platform: Arc, + wait_on_response: Option, - start_event: Arc, + signal: Arc>>>>, - end_event: Arc, + discovery: Discovery, + + config: config::Config, tesseract: Tesseract, - root_task: Arc>>>, + event: broadcast::Sender, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub enum Request { + In(DID), + Out(DID), +} - pub(crate) task_send: - Arc>>>, +impl From for RequestType { + fn from(request: Request) -> Self { + RequestType::from(&request) + } +} - event: broadcast::Sender, +impl From<&Request> for RequestType { + fn from(request: &Request) -> Self { + match request { + Request::In(_) => RequestType::Incoming, + Request::Out(_) => RequestType::Outgoing, + } + } +} - update_event: UpdateEvents, +impl Request { + pub fn r#type(&self) -> RequestType { + self.into() + } + + pub fn did(&self) -> &DID { + match self { + Request::In(did) => did, + Request::Out(did) => did, + } + } +} - disable_image: bool, +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq, Hash, Eq)] +#[serde(rename_all = "lowercase", tag = "type")] +pub enum Event { + /// Event indicating a friend request + Request, + /// Event accepting the request + Accept, + /// Remove identity as a friend + Remove, + /// Reject friend request, if any + Reject, + /// Retract a sent friend request + Retract, + /// Block user + Block, + /// Unblock user + Unblock, + /// Indiciation of a response to a request + Response, +} - friend_store: Arc>>, +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Hash, Eq)] +pub struct RequestResponsePayload { + pub sender: DID, + pub event: Event, +} - default_pfp_callback: Option, +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestType { + Incoming, + Outgoing, } #[allow(clippy::large_enum_variant)] @@ -175,71 +229,58 @@ impl IdentityStore { ipfs: Ipfs, path: Option, tesseract: Tesseract, - interval: Option, tx: broadcast::Sender, - default_pfp_callback: Option, - (discovery, relay, fetch_over_bitswap, share_platform, update_event, disable_image): ( - Discovery, - Vec, - bool, - bool, - UpdateEvents, - bool, - ), + pb_tx: futures::channel::mpsc::Sender, + config: &config::Config, + discovery: Discovery, ) -> Result { if let Some(path) = path.as_ref() { if !path.exists() { tokio::fs::create_dir_all(path).await?; } } - let identity = Arc::new(Default::default()); - let start_event = Arc::new(Default::default()); - let end_event = Arc::new(Default::default()); - let root_cid = Arc::new(Default::default()); - let cache_cid = Arc::new(Default::default()); - let fetch_over_bitswap = Arc::new(AtomicBool::new(fetch_over_bitswap)); + let config = config.clone(); + + let identity_cache = IdentityCache::new(&ipfs, path.clone()).await; + let online_status = Arc::default(); - let share_platform = Arc::new(AtomicBool::new(share_platform)); - let root_task = Arc::default(); - let task_send = Arc::default(); - let event = tx; - let friend_store = Arc::default(); + + let event = tx.clone(); + + let did_key = Arc::new(did_keypair(&tesseract)?); + + let root_document = RootDocumentMap::new(&ipfs, did_key.clone(), path.clone()).await; + + let queue = Queue::new( + ipfs.clone(), + did_key.clone(), + config.path.clone(), + discovery.clone(), + ); + + let phonebook = PhoneBook::new(discovery.clone(), pb_tx); + + let signal = Default::default(); + let wait_on_response = config.store_setting.friend_request_response_duration; let store = Self { ipfs, - path, - root_cid, - cache_cid, - identity, + root_document, + identity_cache, online_status, - start_event, - share_platform, - end_event, discovery, - relay, + config, tesseract, - fetch_over_bitswap, - root_task, - task_send, event, - friend_store, - update_event, - disable_image, - default_pfp_callback, + did_key, + queue, + phonebook, + signal, + wait_on_response, }; - if store.path.is_some() { - if let Err(_e) = store.load_cid().await { - //We can ignore if it doesnt exist - } - } - - store.start_root_task().await; - if let Ok(ident) = store.own_identity().await { log::info!("Identity loaded with {}", ident.did_key()); - *store.identity.write().await = Some(ident); - store.start_event.store(true, Ordering::SeqCst); } let did = store.get_keypair_did()?; @@ -249,19 +290,51 @@ impl IdentityStore { .ipfs .pubsub_subscribe("/identity/announce".into()) .await?; + + store.discovery.start().await?; + + let mut discovery_rx = store.discovery.events(); + + log::info!("Loading queue"); + if let Err(_e) = store.queue.load().await {} + + let phonebook = &store.phonebook; + log::info!("Loading friends list into phonebook"); + if let Ok(friends) = store.friends_list().await { + if let Err(_e) = phonebook.add_friend_list(friends).await { + error!("Error adding friends in phonebook: {_e}"); + } + } + + // scan through friends list to see if there is any incoming request or outgoing request matching + // and clear them out of the request list as a precautionary measure + let friends = store.friends_list().await.unwrap_or_default(); + + for friend in friends { + let list = store.list_all_raw_request().await.unwrap_or_default(); + + // cleanup outgoing + for req in list.iter().filter(|req| req.did().eq(&friend)) { + let _ = store.root_document.remove_request(req).await; + } + } + + let friend_stream = store.ipfs.pubsub_subscribe(store.did_key.inbox()).await?; + tokio::spawn({ let mut store = store.clone(); async move { let _main_stream = main_stream; - if let Err(e) = store.discovery.start().await { - warn!("Error starting discovery service: {e}. Will not be able to discover peers over namespace"); - } futures::pin_mut!(event_stream); + futures::pin_mut!(friend_stream); - let auto_push = interval.is_some(); + let auto_push = store.config.store_setting.auto_push.is_some(); - let interval = interval + let interval = store + .config + .store_setting + .auto_push .map(|i| { if i.as_millis() < 300000 { Duration::from_millis(300000) @@ -272,49 +345,53 @@ impl IdentityStore { .unwrap_or(Duration::from_millis(300000)); let mut tick = tokio::time::interval(interval); - let mut rx = store.discovery.events(); - loop { - if store.end_event.load(Ordering::SeqCst) { - break; - } - if !store.start_event.load(Ordering::SeqCst) { - tokio::time::sleep(Duration::from_millis(10)).await; - continue; - } + loop { tokio::select! { - message = event_stream.next() => { - if let Some(message) = message { - let entry = match message.source { - Some(peer_id) => match store.discovery.get(peer_id).await.ok() { - Some(entry) => entry.peer_id().to_did().ok(), - None => { - let _ = store.discovery.insert(peer_id).await.ok(); - peer_id.to_did().ok() - }, + biased; + Some(message) = event_stream.next() => { + let entry = match message.source { + Some(peer_id) => match store.discovery.get(peer_id).await.ok() { + Some(entry) => entry.peer_id().to_did().ok(), + None => { + let _ = store.discovery.insert(peer_id).await.ok(); + peer_id.to_did().ok() }, - None => continue, - }; - if let Some(in_did) = entry { - if let Err(e) = store.process_message(in_did, &message.data).await { - error!("Error: {e}"); - } + }, + None => continue, + }; + if let Some(in_did) = entry { + if let Err(e) = store.process_message(in_did, &message.data).await { + error!("Error: {e}"); } } + + } + Some(event) = friend_stream.next() => { + let Some(peer_id) = event.source else { + //Note: Due to configuration, we should ALWAYS have a peer set in its source + // thus we can ignore the request if no peer is provided + continue; + }; + + let Ok(did) = peer_id.to_did() else { + //Note: The peer id is embedded with ed25519 public key, therefore we can decode it into a did key + // otherwise we can ignore + continue; + }; + + if let Err(e) = store.check_request_message(&did, &event.data).await { + error!("Error: {e}"); + } } // Used as the initial request/push - Ok(push) = rx.recv() => { - tokio::spawn({ - let store = store.clone(); - async move { - if let Err(e) = store.request(&push, RequestOption::Identity).await { - error!("Error requesting identity: {e}"); - } - if let Err(e) = store.push(&push).await { - error!("Error pushing identity: {e}"); - } - } - }); + Ok(push) = discovery_rx.recv() => { + if let Err(e) = store.request(&push, RequestOption::Identity).await { + error!("Error requesting identity: {e}"); + } + if let Err(e) = store.push(&push).await { + error!("Error pushing identity: {e}"); + } } _ = tick.tick() => { if auto_push { @@ -330,421 +407,249 @@ impl IdentityStore { Ok(store) } - pub async fn set_friend_store(&self, store: FriendsStore) { - *self.friend_store.write().await = Some(store) + pub(crate) fn phonebook(&self) -> &PhoneBook { + &self.phonebook } - async fn friend_store(&self) -> Result { - self.friend_store.read().await.clone().ok_or(Error::Other) - } + //TODO: Implement Errors + #[tracing::instrument(skip(self, data))] + async fn check_request_message(&mut self, did: &DID, data: &[u8]) -> anyhow::Result<()> { + let pk_did = &*self.did_key; - async fn start_root_task(&self) { - let root_cid = self.root_cid.clone(); - let ipfs = self.ipfs.clone(); - let (tx, mut rx) = mpsc::unbounded(); - let store = self.clone(); - let task = tokio::spawn(async move { - let root_cid = root_cid.clone(); - - async fn get_root_document( - ipfs: &Ipfs, - root: Arc>>, - ) -> Result { - let root_cid = root - .read() - .await - .clone() - .ok_or(Error::IdentityDoesntExist)?; - let path = IpfsPath::from(root_cid); - let document: RootDocument = path.get_local_dag(ipfs).await?; - document.verify(ipfs).await.map(|_| document) - } + let bytes = ecdh_decrypt(pk_did, Some(did), data)?; - async fn set_root_document( - ipfs: &Ipfs, - store: &IdentityStore, - root: Arc>>, - mut document: RootDocument, - ) -> Result<(), Error> { - let old_cid = root.read().await.clone(); - - let did_kp = store.get_keypair_did()?; - document.sign(&did_kp)?; - document.verify(ipfs).await?; - - let root_cid = document.to_cid(ipfs).await?; - if !ipfs.is_pinned(&root_cid).await? { - ipfs.insert_pin(&root_cid, true).await?; - } - if let Some(old_cid) = old_cid { - if old_cid != root_cid { - if ipfs.is_pinned(&old_cid).await? { - ipfs.remove_pin(&old_cid, true).await?; - } - ipfs.remove_block(old_cid).await?; - } - } - store.save_cid(root_cid).await - } + log::trace!("received payload size: {} bytes", bytes.len()); - while let Some(event) = rx.next().await { - match event { - RootDocumentEvents::Get(res) => { - let _ = res.send(get_root_document(&ipfs, root_cid.clone()).await); - } - RootDocumentEvents::Set(document, res) => { - let _ = res.send( - set_root_document(&ipfs, &store, root_cid.clone(), document).await, - ); - } - RootDocumentEvents::AddRequest(request, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.request; - let mut list: Vec = match document.request { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.insert_item(&request) { - return Err::<_, Error>(Error::FriendRequestExist); - } + let data = serde_json::from_slice::(&bytes)?; - document.request = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + log::info!("Received event from {did}"); - set_root_document(&ipfs, &store, root_cid, document).await?; + if self + .list_incoming_request() + .await + .unwrap_or_default() + .contains(&data.sender) + && data.event == Event::Request + { + warn!("Request exist locally. Skipping"); + return Ok(()); + } - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::RemoveRequest(request, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.request; - let mut list: Vec = match document.request { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.remove_item(&request) { - return Err::<_, Error>(Error::FriendRequestExist); - } + //TODO: Send error if dropped early due to error when processing request + let mut signal = self.signal.write().await.remove(&data.sender); - document.request = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + log::debug!("Event {:?}", data.event); - set_root_document(&ipfs, &store, root_cid, document).await?; + // Before we validate the request, we should check to see if the key is blocked + // If it is, skip the request so we dont wait resources storing it. + if self.is_blocked(&data.sender).await? && !matches!(data.event, Event::Block) { + log::warn!("Received event from a blocked identity."); + let payload = RequestResponsePayload { + sender: (*self.did_key).clone(), + event: Event::Block, + }; - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::AddFriend(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.friends; - let mut list: Vec = match document.friends { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.insert_item(&did) { - return Err::<_, Error>(Error::FriendExist); - } + self.broadcast_request((&data.sender, &payload), false, true) + .await?; - document.friends = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + return Ok(()); + } - set_root_document(&ipfs, &store, root_cid, document).await?; + match data.event { + Event::Accept => { + let list = self.list_all_raw_request().await?; - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::RemoveFriend(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.friends; - let mut list: Vec = match document.friends { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.remove_item(&did) { - return Err::<_, Error>(Error::FriendDoesntExist); - } + let Some(item) = list + .iter() + .filter(|req| req.r#type() == RequestType::Outgoing) + .find(|req| data.sender.eq(req.did())) + .cloned() + else { + anyhow::bail!( + "Unable to locate pending request. Already been accepted or rejected?" + ) + }; + + // Maybe just try the function instead and have it be a hard error? + if self.root_document.remove_request(&item).await.is_err() { + anyhow::bail!( + "Unable to locate pending request. Already been accepted or rejected?" + ) + } - document.friends = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + self.add_friend(item.did()).await?; + } + Event::Request => { + if self.is_friend(&data.sender).await? { + log::debug!("Friend already exist. Remitting event"); + let payload = RequestResponsePayload { + sender: (*self.did_key).clone(), + event: Event::Accept, + }; + + self.broadcast_request((&data.sender, &payload), false, false) + .await?; + + return Ok(()); + } - set_root_document(&ipfs, &store, root_cid, document).await?; + let list = self.list_all_raw_request().await?; - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; + if let Some(inner_req) = list + .iter() + .find(|request| { + request.r#type() == RequestType::Outgoing && data.sender.eq(request.did()) + }) + .cloned() + { + //Because there is also a corresponding outgoing request for the incoming request + //we can automatically add them + self.root_document.remove_request(&inner_req).await?; + self.add_friend(inner_req.did()).await?; + } else { + self.root_document + .add_request(&Request::In(data.sender.clone())) + .await?; + + tokio::spawn({ + let store = self.clone(); + let from = data.sender.clone(); + async move { + let _ = tokio::time::timeout(Duration::from_secs(10), async { + loop { + if let Ok(list) = + store.lookup(LookupBy::DidKey(from.clone())).await + { + if !list.is_empty() { + break; + } } + tokio::time::sleep(Duration::from_secs(1)).await; } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::AddBlock(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.blocks; - let mut list: Vec = match document.blocks { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.insert_item(&did) { - return Err::<_, Error>(Error::PublicKeyIsBlocked); - } + }) + .await + .ok(); - document.blocks = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + store.emit_event(MultiPassEventKind::FriendRequestReceived { from }); + } + }); + } + let payload = RequestResponsePayload { + sender: (*self.did_key).clone(), + event: Event::Response, + }; - set_root_document(&ipfs, &store, root_cid, document).await?; + self.broadcast_request((&data.sender, &payload), false, false) + .await?; + } + Event::Reject => { + let list = self.list_all_raw_request().await?; + let internal_request = list + .iter() + .find(|request| { + request.r#type() == RequestType::Outgoing && data.sender.eq(request.did()) + }) + .cloned() + .ok_or(Error::FriendRequestDoesntExist)?; - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::RemoveBlock(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - - let old_document = document.blocks; - let mut list: Vec = match document.blocks { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.remove_item(&did) { - return Err::<_, Error>(Error::PublicKeyIsntBlocked); - } + self.root_document.remove_request(&internal_request).await?; - document.blocks = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + self.emit_event(MultiPassEventKind::OutgoingFriendRequestRejected { + did: data.sender, + }); + } + Event::Remove => { + if self.is_friend(&data.sender).await? { + self.remove_friend(&data.sender, false).await?; + } + } + Event::Retract => { + let list = self.list_all_raw_request().await?; + let internal_request = list + .iter() + .find(|request| { + request.r#type() == RequestType::Incoming && data.sender.eq(request.did()) + }) + .cloned() + .ok_or(Error::FriendRequestDoesntExist)?; - set_root_document(&ipfs, &store, root_cid, document).await?; + self.root_document.remove_request(&internal_request).await?; - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::AddBlockBy(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.block_by; - let mut list: Vec = match document.block_by { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.insert_item(&did) { - return Err::<_, Error>(Error::PublicKeyIsntBlocked); - } + self.emit_event(MultiPassEventKind::IncomingFriendRequestClosed { + did: data.sender, + }); + } + Event::Block => { + if self.has_request_from(&data.sender).await? { + self.emit_event(MultiPassEventKind::IncomingFriendRequestClosed { + did: data.sender.clone(), + }); + } else if self.sent_friend_request_to(&data.sender).await? { + self.emit_event(MultiPassEventKind::OutgoingFriendRequestRejected { + did: data.sender.clone(), + }); + } - document.block_by = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + let list = self.list_all_raw_request().await?; + for req in list.iter().filter(|req| req.did().eq(&data.sender)) { + self.root_document.remove_request(req).await?; + } - set_root_document(&ipfs, &store, root_cid, document).await?; + if self.is_friend(&data.sender).await? { + self.remove_friend(&data.sender, false).await?; + } - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::RemoveBlockBy(did, res) => { - let ipfs = ipfs.clone(); - let store = store.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let mut document = - get_root_document(&ipfs, root_cid.clone()).await?; - let old_document = document.block_by; - let mut list: Vec = match document.block_by { - Some(cid) => cid.get_local_dag(&ipfs).await.unwrap_or_default(), - None => vec![], - }; - - if !list.remove_item(&did) { - return Err::<_, Error>(Error::PublicKeyIsntBlocked); - } + let completed = self.root_document.add_block_by(&data.sender).await.is_ok(); + if completed { + let sender = data.sender.clone(); - document.block_by = - (!list.is_empty()).then_some(list.to_cid(&ipfs).await?); + let _ = self.push(&sender).await.ok(); + let _ = self.request(&sender, RequestOption::Identity).await.ok(); - set_root_document(&ipfs, &store, root_cid, document).await?; + self.emit_event(MultiPassEventKind::BlockedBy { did: data.sender }); + } - if let Some(cid) = old_document { - if !ipfs.is_pinned(&cid).await? { - ipfs.remove_block(cid).await?; - } - } - Ok::<_, Error>(()) - } - .await, - ); - } - RootDocumentEvents::GetRequestList(res) => { - let ipfs = ipfs.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let document = get_root_document(&ipfs, root_cid.clone()).await?; - - let list: Vec = match document.request { - Some(cid) => cid.get_local_dag(&ipfs).await?, - None => vec![], - }; - - Ok::<_, Error>(list) - } - .await, - ); - } - RootDocumentEvents::GetFriendList(res) => { - let ipfs = ipfs.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let document = get_root_document(&ipfs, root_cid.clone()).await?; - - let list: Vec = match document.friends { - Some(cid) => cid.get_local_dag(&ipfs).await?, - None => vec![], - }; - - Ok::<_, Error>(list) - } - .await, - ); - } - RootDocumentEvents::GetBlockList(res) => { - let ipfs = ipfs.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let document = get_root_document(&ipfs, root_cid.clone()).await?; - - let list: Vec = match document.blocks { - Some(cid) => cid.get_local_dag(&ipfs).await?, - None => vec![], - }; - - Ok::<_, Error>(list) - } - .await, - ); - } - RootDocumentEvents::GetBlockByList(res) => { - let ipfs = ipfs.clone(); - let root_cid = root_cid.clone(); - let _ = res.send( - async move { - let document = get_root_document(&ipfs, root_cid.clone()).await?; - - let list: Vec = match document.block_by { - Some(cid) => cid.get_local_dag(&ipfs).await?, - None => vec![], - }; - - Ok::<_, Error>(list) - } - .await, - ); - } + if let Some(tx) = std::mem::take(&mut signal) { + log::debug!("Signaling broadcast of response..."); + let _ = tx.send(Err(Error::BlockedByUser)); } } - }); + Event::Unblock => { + let completed = self + .root_document + .remove_block_by(&data.sender) + .await + .is_ok(); + + if completed { + let sender = data.sender.clone(); + + let _ = self.push(&sender).await.ok(); + let _ = self.request(&sender, RequestOption::Identity).await.ok(); + + self.emit_event(MultiPassEventKind::UnblockedBy { did: data.sender }); + } + } + Event::Response => { + if let Some(tx) = std::mem::take(&mut signal) { + log::debug!("Signaling broadcast of response..."); + let _ = tx.send(Ok(())); + } + } + }; + if let Some(tx) = std::mem::take(&mut signal) { + log::debug!("Signaling broadcast of response..."); + let _ = tx.send(Ok(())); + } - *self.task_send.write().await = Some(tx); - *self.root_task.write().await = Some(task); + Ok(()) } async fn push_iter>(&self, list: I) { for did in list { - tokio::spawn({ - let did = did.clone(); - let store = self.clone(); - async move { if let Err(_e) = store.push(&did).await {} } - }); + if let Err(e) = self.push(&did).await { + log::error!("Error pushing identity to {did}: {e}"); + } } } @@ -797,22 +702,13 @@ impl IdentityStore { let mut identity = self.own_identity_document().await?; - let is_friend = match self.friend_store().await { - Ok(store) => store.is_friend(out_did).await.unwrap_or_default(), - _ => false, - }; + let is_friend = self.is_friend(out_did).await.unwrap_or_default(); - let is_blocked = match self.friend_store().await { - Ok(store) => store.is_blocked(out_did).await.unwrap_or_default(), - _ => false, - }; + let is_blocked = self.is_blocked(out_did).await.unwrap_or_default(); - let is_blocked_by = match self.friend_store().await { - Ok(store) => store.is_blocked_by(out_did).await.unwrap_or_default(), - _ => false, - }; + let is_blocked_by = self.is_blocked_by(out_did).await.unwrap_or_default(); - let share_platform = self.share_platform.load(Ordering::SeqCst); + let share_platform = self.config.store_setting.share_platform; let platform = (share_platform && (!is_blocked || !is_blocked_by)).then_some(self.own_platform()); @@ -826,11 +722,13 @@ impl IdentityStore { let profile_picture = identity.profile_picture; let profile_banner = identity.profile_banner; - let include_pictures = (matches!(self.update_event, UpdateEvents::Enabled) - || matches!( - self.update_event, - UpdateEvents::FriendsOnly | UpdateEvents::EmitFriendsOnly - ) && is_friend) + let include_pictures = (matches!( + self.config.store_setting.update_events, + UpdateEvents::Enabled + ) || matches!( + self.config.store_setting.update_events, + UpdateEvents::FriendsOnly | UpdateEvents::EmitFriendsOnly + ) && is_friend) && (!is_blocked && !is_blocked_by); log::trace!("Including cid in push: {include_pictures}"); @@ -971,6 +869,7 @@ impl IdentityStore { } #[tracing::instrument(skip(self, message))] + #[allow(clippy::if_same_then_else)] async fn process_message(&mut self, in_did: DID, message: &[u8]) -> anyhow::Result<()> { let pk_did = self.get_keypair_did()?; @@ -1004,7 +903,7 @@ impl IdentityStore { // Validate after making sure the identity did matches the payload identity.verify()?; - if let Some(own_id) = self.identity.read().await.clone() { + if let Ok(own_id) = self.own_identity().await { anyhow::ensure!( own_id.did_key() != identity.did, "Cannot accept own identity" @@ -1017,177 +916,144 @@ impl IdentityStore { } } - let (old_cid, mut cache_documents) = match self.get_cache_cid().await { - Ok(cid) => match self - .get_local_dag::>(IpfsPath::from(cid)) - .await - { - Ok(doc) => (Some(cid), doc), - _ => (Some(cid), Default::default()), - }, - _ => (None, Default::default()), - }; + let previous_identity = self.identity_cache.get(&identity.did).await.ok(); - let document = cache_documents - .iter() - .find(|document| { - document.did == identity.did && document.short_id == identity.short_id - }) - .cloned(); + self.identity_cache.insert(&identity).await?; - match document { + match previous_identity { Some(document) => { if document.different(&identity) { log::info!("Updating local cache of {}", identity.did); - let document_did = identity.did.clone(); - cache_documents.replace(identity.clone()); - let new_cid = cache_documents.to_cid(&self.ipfs).await?; + let document_did = identity.did.clone(); - self.ipfs.insert_pin(&new_cid, false).await?; - self.save_cache_cid(new_cid).await?; - if let Some(old_cid) = old_cid { - if self.ipfs.is_pinned(&old_cid).await? { - self.ipfs.remove_pin(&old_cid, false).await?; - } - // Do we want to remove the old block? - self.ipfs.remove_block(old_cid).await?; - } let mut emit = false; - if matches!(self.update_event, UpdateEvents::Enabled) { + if matches!( + self.config.store_setting.update_events, + UpdateEvents::Enabled + ) { emit = true; } else if matches!( - self.update_event, + self.config.store_setting.update_events, UpdateEvents::FriendsOnly | UpdateEvents::EmitFriendsOnly - ) { - if let Ok(store) = self.friend_store().await { - if store.is_friend(&document_did).await.unwrap_or_default() { - emit = true; - } - } + ) && self.is_friend(&document_did).await.unwrap_or_default() + { + emit = true; } - tokio::spawn({ - let store = self.clone(); - async move { - if document.profile_picture != identity.profile_picture - && identity.profile_picture.is_some() + + if document.profile_picture != identity.profile_picture + && identity.profile_picture.is_some() + { + log::info!("Requesting profile picture from {}", identity.did); + + if !self.config.store_setting.fetch_over_bitswap { + if let Err(e) = self + .request( + &in_did, + RequestOption::Image { + banner: None, + picture: identity.profile_picture, + }, + ) + .await { - log::info!( - "Requesting profile picture from {}", - identity.did + error!( + "Error requesting profile picture from {in_did}: {e}" ); - - if !store.fetch_over_bitswap.load(Ordering::Relaxed) { - if let Err(e) = store - .request( - &in_did, - RequestOption::Image { - banner: None, - picture: identity.profile_picture, - }, + } + } else { + let identity_profile_picture = + identity.profile_picture.expect("Cid is provided"); + tokio::spawn({ + let ipfs = self.ipfs.clone(); + let emit = emit; + let store = self.clone(); + let did = in_did.clone(); + async move { + let peer_id = vec![did.to_peer_id()?]; + + let mut stream = ipfs + .unixfs() + .cat( + identity_profile_picture, + None, + &peer_id, + false, + None, ) - .await - { - error!("Error requesting profile picture from {in_did}: {e}"); + .await? + .boxed(); + + while let Some(_d) = stream.try_next().await? {} + + if emit { + store.emit_event( + MultiPassEventKind::IdentityUpdate { did }, + ); } - } else { - let identity_profile_picture = - identity.profile_picture.expect("Cid is provided"); - tokio::spawn({ - let ipfs = store.ipfs.clone(); - let emit = emit; - let store = store.clone(); - let did = in_did.clone(); - async move { - let mut stream = ipfs - .unixfs() - .cat( - identity_profile_picture, - None, - &[], - false, - None, - ) - .await? - .boxed(); - while let Some(_d) = stream.next().await { - let _d = _d.map_err(anyhow::Error::from)?; - } - - if emit { - store.emit_event( - MultiPassEventKind::IdentityUpdate { - did, - }, - ); - } - - Ok::<_, anyhow::Error>(()) - } - }); + + Ok::<_, anyhow::Error>(()) } - } - if document.profile_banner != identity.profile_banner - && identity.profile_banner.is_some() + }); + } + } + if document.profile_banner != identity.profile_banner + && identity.profile_banner.is_some() + { + log::info!("Requesting profile banner from {}", identity.did); + + if !self.config.store_setting.fetch_over_bitswap { + if let Err(e) = self + .request( + &in_did, + RequestOption::Image { + banner: identity.profile_banner, + picture: None, + }, + ) + .await { - log::info!( - "Requesting profile banner from {}", - identity.did + error!( + "Error requesting profile banner from {in_did}: {e}" ); - - if !store.fetch_over_bitswap.load(Ordering::Relaxed) { - if let Err(e) = store - .request( - &in_did, - RequestOption::Image { - banner: identity.profile_banner, - picture: None, - }, + } + } else { + let identity_profile_banner = + identity.profile_banner.expect("Cid is provided"); + tokio::spawn({ + let ipfs = self.ipfs.clone(); + let emit = emit; + let did = in_did.clone(); + let store = self.clone(); + async move { + let peer_id = vec![did.to_peer_id()?]; + + let mut stream = ipfs + .unixfs() + .cat( + identity_profile_banner, + None, + &peer_id, + false, + None, ) - .await - { - error!("Error requesting profile banner from {in_did}: {e}"); + .await? + .boxed(); + + while let Some(_d) = stream.try_next().await? {} + + if emit { + store.emit_event( + MultiPassEventKind::IdentityUpdate { did }, + ); } - } else { - let identity_profile_banner = - identity.profile_banner.expect("Cid is provided"); - tokio::spawn({ - let ipfs = store.ipfs.clone(); - let emit = emit; - let did = in_did.clone(); - async move { - let mut stream = ipfs - .unixfs() - .cat( - identity_profile_banner, - None, - &[], - false, - None, - ) - .await? - .boxed(); - - while let Some(_d) = stream.next().await { - let _d = _d.map_err(anyhow::Error::from)?; - } - - if emit { - store.emit_event( - MultiPassEventKind::IdentityUpdate { - did, - }, - ); - } - - Ok::<_, anyhow::Error>(()) - } - }); + + Ok::<_, anyhow::Error>(()) } - } + }); } - }); + } if emit { log::trace!("Emitting identity update event"); @@ -1198,124 +1064,96 @@ impl IdentityStore { } } None => { - log::info!("Caching {} identity document", identity.did); - let document_did = identity.did.clone(); - cache_documents.insert(identity.clone()); + log::info!("{} identity document cached", identity.did); - let new_cid = cache_documents.to_cid(&self.ipfs).await?; - - self.ipfs.insert_pin(&new_cid, false).await?; - self.save_cache_cid(new_cid).await?; - if let Some(old_cid) = old_cid { - if self.ipfs.is_pinned(&old_cid).await? { - self.ipfs.remove_pin(&old_cid, false).await?; - } - // Do we want to remove the old block? - self.ipfs.remove_block(old_cid).await?; - } + let document_did = identity.did.clone(); - if matches!(self.update_event, UpdateEvents::Enabled) { + if matches!( + self.config.store_setting.update_events, + UpdateEvents::Enabled + ) { let did = document_did.clone(); self.emit_event(MultiPassEventKind::IdentityUpdate { did }); } let mut emit = false; - if matches!(self.update_event, UpdateEvents::Enabled) { + if matches!( + self.config.store_setting.update_events, + UpdateEvents::Enabled + ) { emit = true; } else if matches!( - self.update_event, + self.config.store_setting.update_events, UpdateEvents::FriendsOnly | UpdateEvents::EmitFriendsOnly - ) { - if let Ok(store) = self.friend_store().await { - if store.is_friend(&document_did).await.unwrap_or_default() { - emit = true; - } - } + ) && self.is_friend(&document_did).await.unwrap_or_default() + { + emit = true; } if emit { - tokio::spawn({ - let store = self.clone(); - async move { - let mut picture = None; - let mut banner = None; - - if let Some(cid) = identity.profile_picture { - picture = Some(cid); - } + let mut picture = None; + let mut banner = None; - if let Some(cid) = identity.profile_banner { - banner = Some(cid) - } + if let Some(cid) = identity.profile_picture { + picture = Some(cid); + } - if banner.is_some() || picture.is_some() { - if !store.fetch_over_bitswap.load(Ordering::Relaxed) { - store - .request( - &in_did, - RequestOption::Image { banner, picture }, - ) - .await?; - } else { - if let Some(picture) = picture { - tokio::spawn({ - let ipfs = store.ipfs.clone(); - let did = in_did.clone(); - let store = store.clone(); - async move { - let mut stream = ipfs - .unixfs() - .cat(picture, None, &[], false, None) - .await? - .boxed(); - - while let Some(_d) = stream.next().await { - let _d = - _d.map_err(anyhow::Error::from)?; - } - - store.emit_event( - MultiPassEventKind::IdentityUpdate { - did, - }, - ); - - Ok::<_, anyhow::Error>(()) - } - }); + if let Some(cid) = identity.profile_banner { + banner = Some(cid) + } + + if banner.is_some() || picture.is_some() { + if !self.config.store_setting.fetch_over_bitswap { + self.request(&in_did, RequestOption::Image { banner, picture }) + .await?; + } else { + if let Some(picture) = picture { + tokio::spawn({ + let ipfs = self.ipfs.clone(); + let did = in_did.clone(); + let store = self.clone(); + async move { + let mut stream = ipfs + .unixfs() + .cat(picture, None, &[], false, None) + .await? + .boxed(); + + while let Some(_d) = stream.try_next().await? {} + + store.emit_event( + MultiPassEventKind::IdentityUpdate { did }, + ); + + Ok::<_, anyhow::Error>(()) } - if let Some(banner) = banner { - tokio::spawn({ - let tx = store.event.clone(); - let ipfs = store.ipfs.clone(); - - let did = in_did.clone(); - async move { - let mut stream = ipfs - .unixfs() - .cat(banner, None, &[], false, None) - .await? - .boxed(); - while let Some(_d) = stream.next().await { - let _d = - _d.map_err(anyhow::Error::from)?; - } - let _ = tx.send( - MultiPassEventKind::IdentityUpdate { - did, - }, - ); - - Ok::<_, anyhow::Error>(()) - } - }); + }); + } + if let Some(banner) = banner { + tokio::spawn({ + let store = self.clone(); + let ipfs = self.ipfs.clone(); + + let did = in_did.clone(); + async move { + let mut stream = ipfs + .unixfs() + .cat(banner, None, &[], false, None) + .await? + .boxed(); + + while let Some(_d) = stream.try_next().await? {} + + store.emit_event( + MultiPassEventKind::IdentityUpdate { did }, + ); + + Ok::<_, anyhow::Error>(()) } - } + }); } - - Ok::<_, Error>(()) } - }); + } } } }; @@ -1324,33 +1162,28 @@ impl IdentityStore { IdentityEvent::Receive { option: ResponseOption::Image { cid, data }, } => { - let cache_documents = self.cache().await; - if let Some(cache) = cache_documents - .iter() - .find(|document| document.did == in_did) - { - if cache.profile_picture == Some(cid) || cache.profile_banner == Some(cid) { - tokio::spawn({ - let cid = cid; - let mut store = self.clone(); - async move { - let added_cid = store - .store_photo( - futures::stream::iter(Ok::<_, std::io::Error>(Ok(data))) - .boxed(), - Some(2 * 1024 * 1024), - ) - .await?; - - debug_assert_eq!(added_cid, cid); - let tx = store.event.clone(); - let _ = tx.send(MultiPassEventKind::IdentityUpdate { - did: in_did.clone(), - }); - Ok::<_, Error>(()) - } - }); - } + let cache = self.identity_cache.get(&in_did).await?; + + if cache.profile_picture == Some(cid) || cache.profile_banner == Some(cid) { + tokio::spawn({ + let cid = cid; + let mut store = self.clone(); + async move { + let added_cid = store + .store_photo( + futures::stream::iter(Ok::<_, std::io::Error>(Ok(data))) + .boxed(), + Some(2 * 1024 * 1024), + ) + .await?; + + debug_assert_eq!(added_cid, cid); + store.emit_event(MultiPassEventKind::IdentityUpdate { + did: in_did.clone(), + }); + Ok::<_, Error>(()) + } + }); } } }; @@ -1358,7 +1191,7 @@ impl IdentityStore { } fn own_platform(&self) -> Platform { - if self.share_platform.load(Ordering::Relaxed) { + if self.config.store_setting.share_platform { if cfg!(any( target_os = "windows", target_os = "macos", @@ -1383,21 +1216,6 @@ impl IdentityStore { self.discovery.discovery_config() } - pub fn relays(&self) -> &[Multiaddr] { - &self.relay - } - - pub(crate) async fn cache(&self) -> HashSet { - let cid = self.cache_cid.read().await; - match *cid { - Some(cid) => self - .get_local_dag::>(IpfsPath::from(cid)) - .await - .unwrap_or_default(), - None => Default::default(), - } - } - #[tracing::instrument(skip(self, extracted))] pub async fn import_identity( &mut self, @@ -1407,26 +1225,19 @@ impl IdentityStore { let identity = extracted.identity.clone(); - let root_document = RootDocument::import(&self.ipfs, extracted).await?; - - let root_cid = root_document.to_cid(&self.ipfs).await?; + let document = RootDocument::import(&self.ipfs, extracted).await?; - self.ipfs.insert_pin(&root_cid, true).await?; + self.root_document.set(document).await?; - self.save_cid(root_cid).await?; - self.update_identity().await?; - self.enable_event(); + log::info!("Loading friends list into phonebook"); + if let Ok(friends) = self.friends_list().await { + let phonebook = self.phonebook(); - if let Ok(store) = self.friend_store().await { - log::info!("Loading friends list into phonebook"); - if let Ok(friends) = store.friends_list().await { - if let Some(phonebook) = store.phonebook() { - if let Err(_e) = phonebook.add_friend_list(friends).await { - error!("Error adding friends in phonebook: {_e}"); - } - } + if let Err(_e) = phonebook.add_friend_list(friends).await { + error!("Error adding friends in phonebook: {_e}"); } } + Ok(identity) } @@ -1467,41 +1278,33 @@ impl IdentityStore { let ident_cid = identity.to_cid(&self.ipfs).await?; - let mut root_document = RootDocument { + let root_document = RootDocument { identity: ident_cid, ..Default::default() }; - root_document.sign(&did_kp)?; - - let root_cid = root_document.to_cid(&self.ipfs).await?; - - // Pin the dag - self.ipfs.insert_pin(&root_cid, true).await?; + self.root_document.set(root_document).await?; let identity = identity.resolve()?; - self.save_cid(root_cid).await?; - self.update_identity().await?; - self.enable_event(); Ok(identity) } pub async fn local_id_created(&self) -> bool { - self.identity.read().await.is_some() + self.own_identity().await.is_ok() } - //Note: We are calling `IdentityStore::cache` multiple times, but shouldnt have any impact on performance. + pub(crate) fn root_document(&self) -> &RootDocumentMap { + &self.root_document + } + + //Note: We are calling `IdentityStore::cache` multiple times, but shouldnt have any impact on performance. pub async fn lookup(&self, lookup: LookupBy) -> Result, Error> { let own_did = self - .identity - .read() + .own_identity() .await - .clone() .map(|identity| identity.did_key()) - .ok_or_else(|| { - Error::OtherWithContext("Identity store may not be initialized".into()) - })?; + .map_err(|_| Error::OtherWithContext("Identity store may not be initialized".into()))?; let mut preidentity = vec![]; @@ -1513,19 +1316,14 @@ impl IdentityStore { if *pubkey == own_did { return self.own_identity().await.map(|i| vec![i]); } - tokio::spawn({ - let discovery = self.discovery.clone(); - let pubkey = pubkey.clone(); - async move { - if !discovery.contains(&pubkey).await { - discovery.insert(&pubkey).await?; - } - Ok::<_, Error>(()) - } - }); - self.cache() - .await + if !self.discovery.contains(pubkey).await { + self.discovery.insert(pubkey).await?; + } + + self.identity_cache + .list() + .await? .iter() .filter(|ident| ident.did == *pubkey) .cloned() @@ -1533,21 +1331,15 @@ impl IdentityStore { } LookupBy::DidKeys(list) => { let mut items = HashSet::new(); - let cache = self.cache().await; - - tokio::spawn({ - let discovery = self.discovery.clone(); - let list = list.clone(); - let own_did = own_did.clone(); - async move { - for pubkey in list { - if !pubkey.eq(&own_did) && !discovery.contains(&pubkey).await { - discovery.insert(&pubkey).await?; - } + let cache = self.identity_cache.list().await?; + + for pubkey in list { + if !pubkey.eq(&own_did) && !self.discovery.contains(pubkey).await { + if let Err(e) = self.discovery.insert(pubkey).await { + log::error!("Error inserting {pubkey} into discovery: {e}") } - Ok::<_, Error>(()) } - }); + } for pubkey in list { if own_did.eq(pubkey) { @@ -1568,7 +1360,7 @@ impl IdentityStore { Vec::from_iter(items) } LookupBy::Username(username) if username.contains('#') => { - let cache = self.cache().await; + let cache = self.identity_cache.list().await?; let split_data = username.split('#').collect::>(); if split_data.len() != 2 { @@ -1603,16 +1395,18 @@ impl IdentityStore { } LookupBy::Username(username) => { let username = username.to_lowercase(); - self.cache() - .await + self.identity_cache + .list() + .await? .iter() .filter(|ident| ident.username.to_lowercase().contains(&username)) .cloned() .collect::>() } LookupBy::ShortId(id) => self - .cache() - .await + .identity_cache + .list() + .await? .iter() .filter(|ident| String::from_utf8_lossy(&ident.short_id).eq(id)) .cloned() @@ -1634,25 +1428,29 @@ impl IdentityStore { let identity = identity.sign(&kp)?; - let mut root_document = self.get_root_document().await?; + log::debug!("Updateing document"); + let mut root_document = self.root_document.get().await?; let ident_cid = identity.to_cid(&self.ipfs).await?; root_document.identity = ident_cid; - self.set_root_document(root_document).await + self.root_document + .set(root_document) + .await + .map(|_| log::debug!("Root document updated")) + .map_err(|e| { + log::error!("Updating root document failed: {e}"); + e + }) } //TODO: Add a check to check directly through pubsub_peer (maybe even using connected peers) or through a separate server #[tracing::instrument(skip(self))] pub async fn identity_status(&self, did: &DID) -> Result { let own_did = self - .identity - .read() + .own_identity() .await - .clone() .map(|identity| identity.did_key()) - .ok_or_else(|| { - Error::OtherWithContext("Identity store may not be initialized".into()) - })?; + .map_err(|_| Error::OtherWithContext("Identity store may not be initialized".into()))?; if own_did.eq(did) { return self @@ -1685,10 +1483,10 @@ impl IdentityStore { return Ok(status); } - self.cache() + self.identity_cache + .get(did) .await - .iter() - .find(|cache| cache.did.eq(did)) + .ok() .and_then(|cache| cache.status) .or(Some(status)) .ok_or(Error::IdentityDoesntExist) @@ -1696,9 +1494,9 @@ impl IdentityStore { #[tracing::instrument(skip(self))] pub async fn set_identity_status(&mut self, status: IdentityStatus) -> Result<(), Error> { - let mut root_document = self.get_root_document().await?; + let mut root_document = self.root_document.get().await?; root_document.status = Some(status); - self.set_root_document(root_document).await?; + self.root_document.set(root_document).await?; *self.online_status.write().await = Some(status); self.push_to_all().await; Ok(()) @@ -1707,14 +1505,10 @@ impl IdentityStore { #[tracing::instrument(skip(self))] pub async fn identity_platform(&self, did: &DID) -> Result { let own_did = self - .identity - .read() + .own_identity() .await - .clone() .map(|identity| identity.did_key()) - .ok_or_else(|| { - Error::OtherWithContext("Identity store may not be initialized".into()) - })?; + .map_err(|_| Error::OtherWithContext("Identity store may not be initialized".into()))?; if own_did.eq(did) { return Ok(self.own_platform()); @@ -1726,10 +1520,10 @@ impl IdentityStore { return Ok(Platform::Unknown); } - self.cache() + self.identity_cache + .get(did) .await - .iter() - .find(|cache| cache.did.eq(did)) + .ok() .and_then(|cache| cache.platform) .ok_or(Error::IdentityDoesntExist) } @@ -1759,146 +1553,20 @@ impl IdentityStore { .map_err(anyhow::Error::from) } - pub async fn get_root_document(&self) -> Result { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::Get(tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn set_root_document(&mut self, document: RootDocument) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::Set(document, tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_add_friend(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::AddFriend(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_remove_friend(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::RemoveFriend(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_add_block(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::AddBlock(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_remove_block(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::RemoveBlock(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_add_block_by(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::AddBlockBy(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_remove_block_by(&self, did: &DID) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::RemoveBlockBy(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_add_request(&self, did: &Request) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::AddRequest(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_remove_request(&self, did: &Request) -> Result<(), Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::RemoveRequest(did.clone(), tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_get_friends(&self) -> Result, Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::GetFriendList(tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_get_requests(&self) -> Result, Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::GetRequestList(tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_get_blocks(&self) -> Result, Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::GetBlockList(tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - - pub async fn root_document_get_block_by(&self) -> Result, Error> { - let task_tx = self.task_send.read().await.clone().ok_or(Error::Other)?; - let (tx, rx) = oneshot::channel(); - task_tx - .unbounded_send(RootDocumentEvents::GetBlockByList(tx)) - .map_err(anyhow::Error::from)?; - rx.await.map_err(anyhow::Error::from)? - } - pub async fn get_local_dag(&self, path: IpfsPath) -> Result { path.get_local_dag(&self.ipfs).await } pub async fn own_identity_document(&self) -> Result { - let root_document = self.get_root_document().await?; + let root_document = self.root_document.get().await?; let path = IpfsPath::from(root_document.identity); - let identity = self.get_local_dag::(path).await?; + let identity: IdentityDocument = path.get_local_dag(&self.ipfs).await?; identity.verify()?; Ok(identity) } pub async fn own_identity(&self) -> Result { - let root_document = self.get_root_document().await?; + let root_document = self.root_document.get().await?; let path = IpfsPath::from(root_document.identity); let identity = self @@ -1917,25 +1585,6 @@ impl IdentityStore { Ok(identity) } - pub async fn save_cid(&self, cid: Cid) -> Result<(), Error> { - *self.root_cid.write().await = Some(cid); - if let Some(path) = self.path.as_ref() { - let cid = cid.to_string(); - tokio::fs::write(path.join(".id"), cid).await?; - } - Ok(()) - } - - pub async fn save_cache_cid(&self, cid: Cid) -> Result<(), Error> { - log::trace!("Updating cache"); - *self.cache_cid.write().await = Some(cid); - if let Some(path) = self.path.as_ref() { - let cid = cid.to_string(); - tokio::fs::write(path.join(".cache_id"), cid).await?; - } - Ok(()) - } - #[tracing::instrument(skip(self, stream))] pub async fn store_photo( &mut self, @@ -1946,48 +1595,54 @@ impl IdentityStore { let mut stream = ipfs.add_unixfs(stream).await?; - let mut ipfs_path = None; - - while let Some(status) = stream.next().await { - match status { - ipfs::unixfs::UnixfsStatus::ProgressStatus { written, .. } => { + let cid = futures::future::poll_fn(|cx| loop { + match stream.poll_next_unpin(cx) { + Poll::Ready(Some(ipfs::unixfs::UnixfsStatus::ProgressStatus { + written, .. + })) => { if let Some(limit) = limit { - if written >= limit { - return Err(Error::InvalidLength { + if written > limit { + return Poll::Ready(Err(Error::InvalidLength { context: "photo".into(), current: written, minimum: Some(1), maximum: Some(limit), - }); + })); } } log::trace!("{written} bytes written"); } - ipfs::unixfs::UnixfsStatus::CompletedStatus { path, written, .. } => { - log::debug!("Image is written with {written} bytes"); - ipfs_path = Some(path); + Poll::Ready(Some(ipfs::unixfs::UnixfsStatus::CompletedStatus { + path, + written, + .. + })) => { + log::debug!("Image is written with {written} bytes - stored at {path}"); + return Poll::Ready(path.root().cid().copied().ok_or(Error::Other)); } - ipfs::unixfs::UnixfsStatus::FailedStatus { written, error, .. } => { - match error { + Poll::Ready(Some(ipfs::unixfs::UnixfsStatus::FailedStatus { + written, + error, + .. + })) => { + let err = match error { Some(e) => { log::error!("Error uploading picture with {written} bytes written with error: {e}"); - return Err(Error::from(e)); - } + e.into() + }, None => { log::error!("Error uploading picture with {written} bytes written"); - return Err(Error::OtherWithContext("Error uploading photo".into())); + Error::OtherWithContext("Error uploading photo".into()) } - } + }; + + return Poll::Ready(Err(err)); } + Poll::Ready(None) => return Poll::Ready(Err(Error::ReceiverChannelUnavailable)), + Poll::Pending => return Poll::Pending, } - } - - let cid = ipfs_path - .ok_or(Error::Other)? - .root() - .cid() - .copied() - .ok_or(Error::Other)?; + }) + .await?; if !ipfs.is_pinned(&cid).await? { ipfs.insert_pin(&cid, true).await?; @@ -1998,34 +1653,31 @@ impl IdentityStore { #[tracing::instrument(skip(self))] pub async fn identity_picture(&self, did: &DID) -> Result { - if self.disable_image { + if self.config.store_setting.disable_images { return Err(Error::InvalidIdentityPicture); } let document = match self.own_identity_document().await { Ok(document) if document.did.eq(did) => document, - Err(_) | Ok(_) => self - .cache() - .await - .iter() - .find(|ident| ident.did == *did) - .cloned() - .ok_or(Error::IdentityDoesntExist)?, + Err(_) | Ok(_) => self.identity_cache.get(did).await?, }; - let cb = self.default_pfp_callback.clone(); - if let Some(cid) = document.profile_picture { - if let Ok(data) = unixfs_fetch(&self.ipfs, cid, None, true, Some(2 * 1024 * 1024)).await + let data = match unixfs_fetch(&self.ipfs, cid, None, true, Some(2 * 1024 * 1024)).await { - let picture: String = serde_json::from_slice(&data).unwrap_or_default(); - if !picture.is_empty() { - return Ok(picture); + Ok(data) => data, + Err(_) => { + return Err(Error::InvalidIdentityPicture); } + }; + + let picture: String = serde_json::from_slice(&data)?; + if !picture.is_empty() { + return Ok(picture); } } - if let Some(cb) = cb { + if let Some(cb) = self.config.store_setting.default_profile_picture.as_deref() { let identity = document.resolve()?; let picture = cb(&identity)?; return Ok(String::from_utf8_lossy(&picture).to_string()); @@ -2036,34 +1688,31 @@ impl IdentityStore { #[tracing::instrument(skip(self))] pub async fn identity_banner(&self, did: &DID) -> Result { - if self.disable_image { + if self.config.store_setting.disable_images { return Err(Error::InvalidIdentityBanner); } let document = match self.own_identity_document().await { Ok(document) if document.did.eq(did) => document, - Err(_) | Ok(_) => self - .cache() - .await - .iter() - .find(|ident| ident.did == *did) - .cloned() - .ok_or(Error::IdentityDoesntExist)?, + Err(_) | Ok(_) => self.identity_cache.get(did).await?, }; - let cb = self.default_pfp_callback.clone(); - if let Some(cid) = document.profile_banner { - if let Ok(data) = unixfs_fetch(&self.ipfs, cid, None, true, Some(2 * 1024 * 1024)).await + let data = match unixfs_fetch(&self.ipfs, cid, None, true, Some(2 * 1024 * 1024)).await { - let picture: String = serde_json::from_slice(&data).unwrap_or_default(); - if !picture.is_empty() { - return Ok(picture); + Ok(data) => data, + Err(_) => { + return Err(Error::InvalidIdentityPicture); } + }; + + let picture: String = serde_json::from_slice(&data)?; + if !picture.is_empty() { + return Ok(picture); } } - if let Some(cb) = cb { + if let Some(cb) = self.config.store_setting.default_profile_picture.as_deref() { let identity = document.resolve()?; let picture = cb(&identity)?; return Ok(String::from_utf8_lossy(&picture).to_string()); @@ -2117,41 +1766,6 @@ impl IdentityStore { Ok(()) } - pub async fn load_cid(&self) -> Result<(), Error> { - if let Some(path) = self.path.as_ref() { - if let Ok(cid_str) = tokio::fs::read(path.join(".id")) - .await - .map(|bytes| String::from_utf8_lossy(&bytes).to_string()) - { - *self.root_cid.write().await = cid_str.parse().ok() - } - - if let Ok(cid_str) = tokio::fs::read(path.join(".cache_id")) - .await - .map(|bytes| String::from_utf8_lossy(&bytes).to_string()) - { - *self.cache_cid.write().await = cid_str.parse().ok(); - } - } - Ok(()) - } - - pub async fn get_cache_cid(&self) -> Result { - (self.cache_cid.read().await) - .ok_or_else(|| Error::OtherWithContext("Cache cannot be found".into())) - } - - pub async fn get_root_cid(&self) -> Result { - (self.root_cid.read().await).ok_or(Error::IdentityDoesntExist) - } - - pub async fn update_identity(&self) -> Result<(), Error> { - let ident = self.own_identity().await?; - self.validate_identity(&ident)?; - *self.identity.write().await = Some(ident); - Ok(()) - } - pub fn validate_identity(&self, identity: &Identity) -> Result<(), Error> { { let len = identity.username().chars().count(); @@ -2205,21 +1819,489 @@ impl IdentityStore { Ok(()) } - pub fn enable_event(&mut self) { - self.start_event.store(true, Ordering::SeqCst); + pub fn clear_internal_cache(&mut self) {} + + pub fn emit_event(&self, event: MultiPassEventKind) { + let _ = self.event.send(event); + } +} + +impl IdentityStore { + #[tracing::instrument(skip(self))] + pub async fn send_request(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); + + if local_public_key.eq(pubkey) { + return Err(Error::CannotSendSelfFriendRequest); + } + + if self.is_friend(pubkey).await? { + return Err(Error::FriendExist); + } + + if self.is_blocked_by(pubkey).await? { + return Err(Error::BlockedByUser); + } + + if self.is_blocked(pubkey).await? { + return Err(Error::PublicKeyIsBlocked); + } + + if self.has_request_from(pubkey).await? { + return self.accept_request(pubkey).await; + } + + let list = self.list_all_raw_request().await?; + + if list + .iter() + .any(|request| request.r#type() == RequestType::Outgoing && request.did().eq(pubkey)) + { + // since the request has already been sent, we should not be sending it again + return Err(Error::FriendRequestExist); + } + + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Request, + }; + + self.broadcast_request((pubkey, &payload), true, true).await } - pub fn disable_event(&mut self) { - self.start_event.store(false, Ordering::SeqCst); + #[tracing::instrument(skip(self))] + pub async fn accept_request(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); + + if local_public_key.eq(pubkey) { + return Err(Error::CannotAcceptSelfAsFriend); + } + + if !self.has_request_from(pubkey).await? { + return Err(Error::FriendRequestDoesntExist); + } + + let list = self.list_all_raw_request().await?; + + let internal_request = list + .iter() + .find(|request| request.r#type() == RequestType::Incoming && request.did().eq(pubkey)) + .ok_or(Error::CannotFindFriendRequest)?; + + if self.is_friend(pubkey).await? { + warn!("Already friends. Removing request"); + + self.root_document.remove_request(internal_request).await?; + + return Ok(()); + } + + let payload = RequestResponsePayload { + event: Event::Accept, + sender: local_public_key, + }; + + self.add_friend(pubkey).await?; + + self.root_document.remove_request(internal_request).await?; + + self.broadcast_request((pubkey, &payload), false, true) + .await } - pub fn end_event(&mut self) { - self.end_event.store(true, Ordering::SeqCst); + #[tracing::instrument(skip(self))] + pub async fn reject_request(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); + + if local_public_key.eq(pubkey) { + return Err(Error::CannotDenySelfAsFriend); + } + + if !self.has_request_from(pubkey).await? { + return Err(Error::FriendRequestDoesntExist); + } + + let list = self.list_all_raw_request().await?; + + // Although the request been validated before storing, we should validate again just to be safe + let internal_request = list + .iter() + .find(|request| request.r#type() == RequestType::Incoming && request.did().eq(pubkey)) + .ok_or(Error::CannotFindFriendRequest)?; + + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Reject, + }; + + self.root_document.remove_request(internal_request).await?; + + self.broadcast_request((pubkey, &payload), false, true) + .await } - pub fn clear_internal_cache(&mut self) {} + #[tracing::instrument(skip(self))] + pub async fn close_request(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); - pub fn emit_event(&self, event: MultiPassEventKind) { - let _ = self.event.send(event); + let list = self.list_all_raw_request().await?; + + let internal_request = list + .iter() + .find(|request| request.r#type() == RequestType::Outgoing && request.did().eq(pubkey)) + .ok_or(Error::CannotFindFriendRequest)?; + + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Retract, + }; + + self.root_document.remove_request(internal_request).await?; + + if let Some(entry) = self.queue.get(pubkey).await { + if entry.event == Event::Request { + self.queue.remove(pubkey).await; + self.emit_event(MultiPassEventKind::OutgoingFriendRequestClosed { + did: pubkey.clone(), + }); + + return Ok(()); + } + } + + self.broadcast_request((pubkey, &payload), false, true) + .await + } + + #[tracing::instrument(skip(self))] + pub async fn has_request_from(&self, pubkey: &DID) -> Result { + self.list_incoming_request() + .await + .map(|list| list.contains(pubkey)) + } +} + +impl IdentityStore { + #[tracing::instrument(skip(self))] + pub async fn block_list(&self) -> Result, Error> { + self.root_document.get_blocks().await + } + + #[tracing::instrument(skip(self))] + pub async fn is_blocked(&self, public_key: &DID) -> Result { + self.block_list() + .await + .map(|list| list.contains(public_key)) + } + + #[tracing::instrument(skip(self))] + pub async fn block(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); + + if local_public_key.eq(pubkey) { + return Err(Error::CannotBlockOwnKey); + } + + if self.is_blocked(pubkey).await? { + return Err(Error::PublicKeyIsBlocked); + } + + self.root_document.add_block(pubkey).await?; + + // Remove anything from queue related to the key + self.queue.remove(pubkey).await; + + let list = self.list_all_raw_request().await?; + for req in list.iter().filter(|req| req.did().eq(pubkey)) { + self.root_document.remove_request(req).await?; + } + + if self.is_friend(pubkey).await? { + if let Err(e) = self.remove_friend(pubkey, false).await { + error!("Error removing item from friend list: {e}"); + } + } + + // Since we want to broadcast the remove request, banning the peer after would not allow that to happen + // Although this may get uncomment in the future to block connections regardless if its sent or not, or + // if we decide to send the request through a relay to broadcast it to the peer, however + // the moment this extension is reloaded the block list are considered as a "banned peer" in libp2p + + // let peer_id = did_to_libp2p_pub(pubkey)?.to_peer_id(); + + // self.ipfs.ban_peer(peer_id).await?; + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Block, + }; + + self.broadcast_request((pubkey, &payload), false, true) + .await + } + + #[tracing::instrument(skip(self))] + pub async fn unblock(&mut self, pubkey: &DID) -> Result<(), Error> { + let local_public_key = (*self.did_key).clone(); + + if local_public_key.eq(pubkey) { + return Err(Error::CannotUnblockOwnKey); + } + + if !self.is_blocked(pubkey).await? { + return Err(Error::PublicKeyIsntBlocked); + } + + self.root_document.remove_block(pubkey).await?; + + let peer_id = did_to_libp2p_pub(pubkey)?.to_peer_id(); + self.ipfs.unban_peer(peer_id).await?; + + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Unblock, + }; + + self.broadcast_request((pubkey, &payload), false, true) + .await + } +} + +impl IdentityStore { + pub async fn block_by_list(&self) -> Result, Error> { + self.root_document.get_block_by().await + } + + pub async fn is_blocked_by(&self, pubkey: &DID) -> Result { + self.block_by_list().await.map(|list| list.contains(pubkey)) + } +} + +impl IdentityStore { + pub async fn friends_list(&self) -> Result, Error> { + self.root_document.get_friends().await + } + + // Should not be called directly but only after a request is accepted + #[tracing::instrument(skip(self))] + pub async fn add_friend(&mut self, pubkey: &DID) -> Result<(), Error> { + if self.is_friend(pubkey).await? { + return Err(Error::FriendExist); + } + + if self.is_blocked(pubkey).await? { + return Err(Error::PublicKeyIsBlocked); + } + + self.root_document.add_friend(pubkey).await?; + + let phonebook = self.phonebook(); + if let Err(_e) = phonebook.add_friend(pubkey).await { + error!("Error: {_e}"); + } + + // Push to give an update in the event any wasnt transmitted during the initial push + // We dont care if this errors or not. + let _ = self.push(pubkey).await.ok(); + + self.emit_event(MultiPassEventKind::FriendAdded { + did: pubkey.clone(), + }); + + Ok(()) + } + + #[tracing::instrument(skip(self, broadcast))] + pub async fn remove_friend(&mut self, pubkey: &DID, broadcast: bool) -> Result<(), Error> { + if !self.is_friend(pubkey).await? { + return Err(Error::FriendDoesntExist); + } + + self.root_document.remove_friend(pubkey).await?; + + let phonebook = self.phonebook(); + + if let Err(_e) = phonebook.remove_friend(pubkey).await { + error!("Error: {_e}"); + } + + if broadcast { + let local_public_key = (*self.did_key).clone(); + + let payload = RequestResponsePayload { + sender: local_public_key, + event: Event::Remove, + }; + + self.broadcast_request((pubkey, &payload), false, true) + .await?; + } + + self.emit_event(MultiPassEventKind::FriendRemoved { + did: pubkey.clone(), + }); + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub async fn is_friend(&self, pubkey: &DID) -> Result { + self.friends_list().await.map(|list| list.contains(pubkey)) + } +} + +impl IdentityStore { + pub async fn list_all_raw_request(&self) -> Result, Error> { + self.root_document.get_requests().await + } + + pub async fn received_friend_request_from(&self, did: &DID) -> Result { + self.list_incoming_request() + .await + .map(|list| list.iter().any(|request| request.eq(did))) + } + + #[tracing::instrument(skip(self))] + pub async fn list_incoming_request(&self) -> Result, Error> { + self.list_all_raw_request().await.map(|list| { + list.iter() + .filter_map(|request| match request { + Request::In(request) => Some(request), + _ => None, + }) + .cloned() + .collect::>() + }) + } + + #[tracing::instrument(skip(self))] + pub async fn sent_friend_request_to(&self, did: &DID) -> Result { + self.list_outgoing_request() + .await + .map(|list| list.iter().any(|request| request.eq(did))) + } + + #[tracing::instrument(skip(self))] + pub async fn list_outgoing_request(&self) -> Result, Error> { + self.list_all_raw_request().await.map(|list| { + list.iter() + .filter_map(|request| match request { + Request::Out(request) => Some(request), + _ => None, + }) + .cloned() + .collect::>() + }) + } + + #[tracing::instrument(skip(self))] + pub async fn broadcast_request( + &mut self, + (recipient, payload): (&DID, &RequestResponsePayload), + store_request: bool, + queue_broadcast: bool, + ) -> Result<(), Error> { + let remote_peer_id = did_to_libp2p_pub(recipient)?.to_peer_id(); + + if !self.discovery.contains(recipient).await { + self.discovery.insert(recipient).await?; + } + + if store_request { + let outgoing_request = Request::Out(recipient.clone()); + let list = self.list_all_raw_request().await?; + if !list.contains(&outgoing_request) { + self.root_document.add_request(&outgoing_request).await?; + } + } + + let kp = &*self.did_key; + + let payload_bytes = serde_json::to_vec(&payload)?; + + let bytes = ecdh_encrypt(kp, Some(recipient), payload_bytes)?; + + log::trace!("Request Payload size: {} bytes", bytes.len()); + + log::info!("Sending event to {recipient}"); + + let peers = self.ipfs.pubsub_peers(Some(recipient.inbox())).await?; + + let mut queued = false; + + let wait = self.wait_on_response.is_some(); + + let mut rx = (matches!(payload.event, Event::Request) && wait).then_some({ + let (tx, rx) = oneshot::channel(); + self.signal.write().await.insert(recipient.clone(), tx); + rx + }); + + let start = Instant::now(); + if !peers.contains(&remote_peer_id) + || (peers.contains(&remote_peer_id) + && self + .ipfs + .pubsub_publish(recipient.inbox(), bytes) + .await + .is_err()) + && queue_broadcast + { + self.queue.insert(recipient, payload.clone()).await; + queued = true; + self.signal.write().await.remove(recipient); + } + + if !queued { + let end = start.elapsed(); + log::trace!("Took {}ms to send event", end.as_millis()); + } + + if !queued && matches!(payload.event, Event::Request) { + if let Some(rx) = std::mem::take(&mut rx) { + if let Some(timeout) = self.wait_on_response { + let start = Instant::now(); + if let Ok(Ok(res)) = tokio::time::timeout(timeout, rx).await { + let end = start.elapsed(); + log::trace!("Took {}ms to receive a response", end.as_millis()); + res? + } + } + } + } + + match payload.event { + Event::Request => { + self.emit_event(MultiPassEventKind::FriendRequestSent { + to: recipient.clone(), + }); + } + Event::Retract => { + self.emit_event(MultiPassEventKind::OutgoingFriendRequestClosed { + did: recipient.clone(), + }); + } + Event::Reject => { + self.emit_event(MultiPassEventKind::IncomingFriendRequestRejected { + did: recipient.clone(), + }); + } + Event::Block => { + let _ = self.push(recipient).await; + let _ = self.request(recipient, RequestOption::Identity).await; + self.emit_event(MultiPassEventKind::Blocked { + did: recipient.clone(), + }); + } + Event::Unblock => { + let _ = self.push(recipient).await; + let _ = self.request(recipient, RequestOption::Identity).await; + + self.emit_event(MultiPassEventKind::Unblocked { + did: recipient.clone(), + }); + } + _ => {} + }; + Ok(()) } } diff --git a/extensions/warp-ipfs/src/store/keystore.rs b/extensions/warp-ipfs/src/store/keystore.rs index 00f7ba443..d05667cb0 100644 --- a/extensions/warp-ipfs/src/store/keystore.rs +++ b/extensions/warp-ipfs/src/store/keystore.rs @@ -64,11 +64,10 @@ impl Keystore { pub fn get_latest(&self, did: &DID, recipient: &DID) -> Result, Error> { self.recipient_key .get(recipient) - .map(|list| { + .and_then(|list| { list.last() .and_then(|entry| super::ecdh_decrypt(did, None, entry).ok()) }) - .and_then(|entry| entry) .ok_or(Error::PublicKeyInvalid) } diff --git a/extensions/warp-ipfs/src/store/message.rs b/extensions/warp-ipfs/src/store/message.rs index a6745129f..74606c682 100644 --- a/extensions/warp-ipfs/src/store/message.rs +++ b/extensions/warp-ipfs/src/store/message.rs @@ -42,7 +42,6 @@ use crate::store::{ use super::conversation::{ConversationDocument, MessageDocument}; use super::discovery::Discovery; use super::document::utils::{GetLocalDag, ToCid}; -use super::friends::FriendsStore; use super::identity::IdentityStore; use super::keystore::Keystore; use super::{did_to_libp2p_pub, verify_serde_sig, ConversationEvents, MessagingEvents}; @@ -81,7 +80,7 @@ pub struct MessageStore { identity: IdentityStore, // friend store - friends: FriendsStore, + // friends: FriendsStore, // discovery discovery: Discovery, @@ -107,8 +106,6 @@ pub struct MessageStore { spam_filter: Arc>, with_friends: Arc, - - disable_sender_event_emit: Arc, } #[allow(clippy::too_many_arguments)] @@ -117,18 +114,13 @@ impl MessageStore { ipfs: Ipfs, path: Option, identity: IdentityStore, - friends: FriendsStore, + // friends: FriendsStore, discovery: Discovery, filesystem: Option>, _: bool, interval_ms: u64, event: BroadcastSender, - (check_spam, disable_sender_event_emit, with_friends, conversation_load_task): ( - bool, - bool, - bool, - bool, - ), + (check_spam, with_friends): (bool, bool), ) -> anyhow::Result { info!("Initializing MessageStore"); @@ -144,7 +136,6 @@ impl MessageStore { let spam_filter = Arc::new(check_spam.then_some(SpamFilter::default()?)); let stream_task = Arc::new(Default::default()); let stream_event_task = Arc::new(Default::default()); - let disable_sender_event_emit = Arc::new(AtomicBool::new(disable_sender_event_emit)); let with_friends = Arc::new(AtomicBool::new(with_friends)); let stream_sender = Arc::new(Default::default()); let conversation_lock = Arc::new(Default::default()); @@ -166,21 +157,20 @@ impl MessageStore { conversation_sender, conversation_task_tx, identity, - friends, + // friends, discovery, filesystem, queue, did, event, spam_filter, - disable_sender_event_emit, with_friends, conversation_keystore_cid, stream_conversation_task, }; info!("Loading existing conversations task"); - if let Err(_e) = store.load_conversations(conversation_load_task).await {} + if let Err(_e) = store.load_conversations().await {} tokio::spawn({ let mut store = store.clone(); @@ -1414,7 +1404,7 @@ impl MessageStore { return Ok(()); } - if let Ok(true) = self.friends.is_blocked(&recipient).await { + if let Ok(true) = self.identity.is_blocked(&recipient).await { //TODO: Signal back to close conversation warn!("{recipient} is blocked"); return Ok(()); @@ -1781,11 +1771,11 @@ impl MessageStore { impl MessageStore { pub async fn create_conversation(&mut self, did_key: &DID) -> Result { - if self.with_friends.load(Ordering::SeqCst) && !self.friends.is_friend(did_key).await? { + if self.with_friends.load(Ordering::SeqCst) && !self.identity.is_friend(did_key).await? { return Err(Error::FriendDoesntExist); } - if let Ok(true) = self.friends.is_blocked(did_key).await { + if let Ok(true) = self.identity.is_blocked(did_key).await { return Err(Error::PublicKeyIsBlocked); } @@ -1872,12 +1862,10 @@ impl MessageStore { } } - if !self.disable_sender_event_emit.load(Ordering::Relaxed) { - if let Err(e) = self.event.send(RayGunEventKind::ConversationCreated { - conversation_id: conversation.id(), - }) { - error!("Error broadcasting event: {e}"); - } + if let Err(e) = self.event.send(RayGunEventKind::ConversationCreated { + conversation_id: conversation.id(), + }) { + error!("Error broadcasting event: {e}"); } if let Some(path) = self.path.as_ref() { @@ -1918,12 +1906,12 @@ impl MessageStore { let mut removal = vec![]; for did in recipients.iter() { - if self.with_friends.load(Ordering::SeqCst) && !self.friends.is_friend(did).await? { + if self.with_friends.load(Ordering::SeqCst) && !self.identity.is_friend(did).await? { info!("{did} is not on the friends list.. removing from list"); removal.push(did.clone()); } - if let Ok(true) = self.friends.is_blocked(did).await { + if let Ok(true) = self.identity.is_blocked(did).await { info!("{did} is blocked.. removing from list"); removal.push(did.clone()); } @@ -2021,12 +2009,10 @@ impl MessageStore { } } - if !self.disable_sender_event_emit.load(Ordering::Relaxed) { - if let Err(e) = self.event.send(RayGunEventKind::ConversationCreated { - conversation_id: conversation.id(), - }) { - error!("Error broadcasting event: {e}"); - } + if let Err(e) = self.event.send(RayGunEventKind::ConversationCreated { + conversation_id: conversation.id(), + }) { + error!("Error broadcasting event: {e}"); } tokio::spawn({ @@ -2218,7 +2204,7 @@ impl MessageStore { .map_err(Error::from) } - pub async fn load_conversations(&self, background: bool) -> Result<(), Error> { + pub async fn load_conversations(&self) -> Result<(), Error> { let Some(path) = self.path.as_ref() else { return Ok(()); }; @@ -2290,9 +2276,7 @@ impl MessageStore { } }; - if background { - tokio::spawn(task); - } else if let Err(e) = task.await { + if let Err(e) = task.await { error!("Error loading conversation: {e}"); } } @@ -2736,7 +2720,7 @@ impl MessageStore { return Err(Error::PublicKeyInvalid); } - if self.friends.is_blocked(did_key).await? { + if self.identity.is_blocked(did_key).await? { return Err(Error::PublicKeyIsBlocked); } diff --git a/extensions/warp-ipfs/src/store/mod.rs b/extensions/warp-ipfs/src/store/mod.rs index 258deb436..a696ba312 100644 --- a/extensions/warp-ipfs/src/store/mod.rs +++ b/extensions/warp-ipfs/src/store/mod.rs @@ -2,7 +2,6 @@ pub mod conversation; pub mod discovery; pub mod document; pub mod files; -pub mod friends; pub mod identity; pub mod keystore; pub mod message; diff --git a/extensions/warp-ipfs/src/store/phonebook.rs b/extensions/warp-ipfs/src/store/phonebook.rs index d90d8709b..43c0a6e04 100644 --- a/extensions/warp-ipfs/src/store/phonebook.rs +++ b/extensions/warp-ipfs/src/store/phonebook.rs @@ -1,12 +1,8 @@ use futures::channel::oneshot; use futures::SinkExt; -use rust_ipfs as ipfs; -use tokio::sync::broadcast; -use ipfs::Ipfs; use warp::crypto::DID; use warp::error::Error; -use warp::multipass::MultiPassEventKind; use crate::behaviour::phonebook::PhoneBookCommand; @@ -22,10 +18,7 @@ pub struct PhoneBook { impl PhoneBook { pub fn new( - _: Ipfs, discovery: Discovery, - _: broadcast::Sender, - _: bool, pb_tx: futures::channel::mpsc::Sender, ) -> Self { PhoneBook { discovery, pb_tx } diff --git a/extensions/warp-ipfs/src/store/queue.rs b/extensions/warp-ipfs/src/store/queue.rs index 400e210ac..c113a2a9a 100644 --- a/extensions/warp-ipfs/src/store/queue.rs +++ b/extensions/warp-ipfs/src/store/queue.rs @@ -21,7 +21,7 @@ use warp::{ use crate::store::{ecdh_encrypt, PeerIdExt, PeerTopic}; -use super::{connected_to_peer, discovery::Discovery, friends::RequestResponsePayload}; +use super::{connected_to_peer, discovery::Discovery, identity::RequestResponsePayload}; pub struct Queue { path: Option, diff --git a/extensions/warp-ipfs/tests/accounts.rs b/extensions/warp-ipfs/tests/accounts.rs index 19cf2312c..b3291593b 100644 --- a/extensions/warp-ipfs/tests/accounts.rs +++ b/extensions/warp-ipfs/tests/accounts.rs @@ -269,6 +269,44 @@ mod test { Ok(()) } + #[tokio::test] + async fn identity_profile_picture() -> anyhow::Result<()> { + let (mut account, did, _) = create_account( + Some("JohnDoe"), + None, + Some("test::identity_profile_picture".into()), + ) + .await?; + + account + .update_identity(IdentityUpdate::Picture("picture".into())) + .await?; + + let image = account.identity_picture(&did).await?; + + assert_eq!(image, "picture"); + Ok(()) + } + + #[tokio::test] + async fn identity_profile_banner() -> anyhow::Result<()> { + let (mut account, did, _) = create_account( + Some("JohnDoe"), + None, + Some("test::identity_profile_banner".into()), + ) + .await?; + + account + .update_identity(IdentityUpdate::Banner("banner".into())) + .await?; + + let image = account.identity_banner(&did).await?; + + assert_eq!(image, "banner"); + Ok(()) + } + #[tokio::test] async fn get_identity_platform() -> anyhow::Result<()> { let accounts = create_accounts(vec![ From c537ce1fb668d8196b2eb51ac88f0a8a36f403a4 Mon Sep 17 00:00:00 2001 From: Darius Date: Wed, 11 Oct 2023 17:43:49 -0400 Subject: [PATCH 7/8] chore: Remove field --- extensions/warp-ipfs/src/store/identity.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/extensions/warp-ipfs/src/store/identity.rs b/extensions/warp-ipfs/src/store/identity.rs index 4164d351b..1fd4f92cf 100644 --- a/extensions/warp-ipfs/src/store/identity.rs +++ b/extensions/warp-ipfs/src/store/identity.rs @@ -69,8 +69,6 @@ pub struct IdentityStore { phonebook: PhoneBook, - wait_on_response: Option, - signal: Arc>>>>, discovery: Discovery, @@ -261,7 +259,6 @@ impl IdentityStore { let phonebook = PhoneBook::new(discovery.clone(), pb_tx); let signal = Default::default(); - let wait_on_response = config.store_setting.friend_request_response_duration; let store = Self { ipfs, @@ -276,7 +273,6 @@ impl IdentityStore { queue, phonebook, signal, - wait_on_response, }; if let Ok(ident) = store.own_identity().await { @@ -2228,7 +2224,11 @@ impl IdentityStore { let mut queued = false; - let wait = self.wait_on_response.is_some(); + let wait = self + .config + .store_setting + .friend_request_response_duration + .is_some(); let mut rx = (matches!(payload.event, Event::Request) && wait).then_some({ let (tx, rx) = oneshot::channel(); @@ -2258,7 +2258,7 @@ impl IdentityStore { if !queued && matches!(payload.event, Event::Request) { if let Some(rx) = std::mem::take(&mut rx) { - if let Some(timeout) = self.wait_on_response { + if let Some(timeout) = self.config.store_setting.friend_request_response_duration { let start = Instant::now(); if let Ok(Ok(res)) = tokio::time::timeout(timeout, rx).await { let end = start.elapsed(); From daa51cf34582180f4787f3466ff72764fced07aa Mon Sep 17 00:00:00 2001 From: Darius Date: Wed, 11 Oct 2023 18:35:57 -0400 Subject: [PATCH 8/8] chore: Correct limit in verify --- extensions/warp-ipfs/src/store/document/identity.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions/warp-ipfs/src/store/document/identity.rs b/extensions/warp-ipfs/src/store/document/identity.rs index 1105b7102..cf7824b6c 100644 --- a/extensions/warp-ipfs/src/store/document/identity.rs +++ b/extensions/warp-ipfs/src/store/document/identity.rs @@ -153,12 +153,12 @@ impl IdentityDocument { } if let Some(status) = &payload.status_message { - if status.len() > 256 { + if status.len() > 512 { return Err(Error::InvalidLength { context: "identity status message".into(), current: status.len(), minimum: None, - maximum: Some(256), + maximum: Some(512), }); } }